From 7fa5fe946b96baa2d49164024beeca20fa5fa050 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Thu, 22 Jan 2009 00:21:16 +0000 Subject: [PATCH] HBASE-1064 HBase REST xml/json improvements git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@736503 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES.txt | 4 +- NOTICE.txt | 34 ++ lib/AgileJSON-2.0.jar | Bin 0 -> 67533 bytes .../hadoop/hbase/HColumnDescriptor.java | 23 +- .../apache/hadoop/hbase/HTableDescriptor.java | 20 +- .../apache/hadoop/hbase/RegionHistorian.java | 2 +- src/java/org/apache/hadoop/hbase/io/Cell.java | 105 ++-- .../org/apache/hadoop/hbase/io/RowResult.java | 34 +- .../regionserver/CompactSplitThread.java | 2 +- .../hadoop/hbase/rest/AbstractController.java | 72 +++ .../hadoop/hbase/rest/AbstractModel.java | 99 ++++ .../hadoop/hbase/rest/DatabaseController.java | 84 ++++ .../hadoop/hbase/rest/DatabaseModel.java | 85 ++++ .../apache/hadoop/hbase/rest/Dispatcher.java | 466 +++++++++++++----- .../hadoop/hbase/rest/GenericHandler.java | 342 ------------- .../apache/hadoop/hbase/rest/MetaHandler.java | 108 ---- .../hadoop/hbase/rest/RESTConstants.java | 111 +++++ .../hadoop/hbase/rest/RowController.java | 135 +++++ .../apache/hadoop/hbase/rest/RowHandler.java | 346 ------------- .../apache/hadoop/hbase/rest/RowModel.java | 140 ++++++ .../hadoop/hbase/rest/ScannerController.java | 358 ++++++++++++++ .../hadoop/hbase/rest/ScannerHandler.java | 339 ------------- .../hadoop/hbase/rest/ScannerModel.java | 282 +++++++++++ .../org/apache/hadoop/hbase/rest/Status.java | 256 ++++++++++ .../hadoop/hbase/rest/TableController.java | 170 +++++++ .../hadoop/hbase/rest/TableHandler.java | 416 ---------------- .../apache/hadoop/hbase/rest/TableModel.java | 280 +++++++++++ .../hbase/rest/TimestampController.java | 139 ++++++ .../hadoop/hbase/rest/TimestampModel.java | 126 +++++ .../hbase/rest/descriptors/RestCell.java | 103 ++++ .../rest/descriptors/RowUpdateDescriptor.java | 74 +++ .../rest/descriptors/ScannerDescriptor.java | 130 +++++ .../rest/descriptors/ScannerIdentifier.java | 96 ++++ .../descriptors/TimestampsDescriptor.java | 67 +++ .../rest/exception/HBaseRestException.java | 86 ++++ .../rest/filter/ColumnValueFilterFactory.java | 66 +++ .../hbase/rest/filter/FilterFactory.java | 71 +++ .../rest/filter/FilterFactoryConstants.java | 41 ++ .../filter/InclusiveStopRowFilterFactory.java | 37 ++ .../rest/filter/PageRowFilterFactory.java | 34 ++ .../rest/filter/RegExpRowFilterFactory.java | 34 ++ .../rest/filter/RowFilterSetFactory.java | 115 +++++ .../rest/filter/StopRowFilterFactory.java | 37 ++ .../filter/WhileMatchRowFilterFactory.java | 61 +++ .../rest/parser/HBaseRestParserFactory.java | 56 +++ .../hbase/rest/parser/IHBaseRestParser.java | 52 ++ .../hbase/rest/parser/JsonRestParser.java | 235 +++++++++ .../hbase/rest/parser/XMLRestParser.java | 291 +++++++++++ .../serializer/AbstractRestSerializer.java | 58 +++ .../rest/serializer/IRestSerializer.java | 173 +++++++ .../hbase/rest/serializer/ISerializable.java | 42 ++ .../hbase/rest/serializer/JSONSerializer.java | 213 ++++++++ .../serializer/RestSerializerFactory.java | 56 +++ .../rest/serializer/SimpleXMLSerializer.java | 464 +++++++++++++++++ src/webapps/rest/WEB-INF/web.xml | 6 +- 55 files changed, 5545 insertions(+), 1731 deletions(-) create mode 100644 lib/AgileJSON-2.0.jar create mode 100644 src/java/org/apache/hadoop/hbase/rest/AbstractController.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/AbstractModel.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/DatabaseController.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/DatabaseModel.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/RESTConstants.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/RowController.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/RowModel.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/ScannerController.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/ScannerModel.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/Status.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/TableController.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/TableModel.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/TimestampController.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/TimestampModel.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/descriptors/RestCell.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/descriptors/RowUpdateDescriptor.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/descriptors/ScannerDescriptor.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/descriptors/ScannerIdentifier.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/descriptors/TimestampsDescriptor.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/exception/HBaseRestException.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/filter/ColumnValueFilterFactory.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/filter/FilterFactory.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/filter/FilterFactoryConstants.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/filter/InclusiveStopRowFilterFactory.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/filter/PageRowFilterFactory.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/filter/RegExpRowFilterFactory.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/filter/RowFilterSetFactory.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/filter/StopRowFilterFactory.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/filter/WhileMatchRowFilterFactory.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/parser/HBaseRestParserFactory.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/parser/IHBaseRestParser.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/parser/JsonRestParser.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/parser/XMLRestParser.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/serializer/AbstractRestSerializer.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/serializer/IRestSerializer.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/serializer/ISerializable.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/serializer/JSONSerializer.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/serializer/RestSerializerFactory.java create mode 100644 src/java/org/apache/hadoop/hbase/rest/serializer/SimpleXMLSerializer.java diff --git a/CHANGES.txt b/CHANGES.txt index 638d69cf6ef9..e3e75e515095 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -23,8 +23,10 @@ Release 0.20.0 - Unreleased HBASE-1031 Add the Zookeeper jar HBASE-1142 Cleanup thrift server; remove Text and profuse DEBUG messaging (Tim Sell via Stack) + HBASE-1064 HBase REST xml/json improvements (Brian Beggs working of + initial Michael Gottesman work via Stack) -Release 0.19.0 - Unreleased +Release 0.19.0 - 01/21/2009 INCOMPATIBLE CHANGES HBASE-885 TableMap and TableReduce should be interfaces (Doğacan Güney via Stack) diff --git a/NOTICE.txt b/NOTICE.txt index 40a24e9789d7..4fb7d7469353 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -3,8 +3,42 @@ Foundation (http://www.apache.org/). In addition, this product includes software developed by: + European Commission project OneLab (http://www.one-lab.org) + Facebook, Inc. (http://developers.facebook.com/thrift/ -- Page includes the Thrift Software License) + JUnit (http://www.junit.org/) + + +Michael Gottesman developed AgileJSON. Its source code is here: + + http://github.com/gottesmm/agile-json-2.0/tree/master + +It has this license at the head of the each source file: + + * Permission is hereby granted, free of charge, to any person obtaining a + * copy + * of this software and associated documentation files (the "Software"), to + * deal + * in the Software without restriction, including without limitation the + * rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all + * copies or substantial portions of the Software. + * + * The Software shall be used for Good, not Evil. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. diff --git a/lib/AgileJSON-2.0.jar b/lib/AgileJSON-2.0.jar new file mode 100644 index 0000000000000000000000000000000000000000..906161a9cf7cdcc3b17599fc161a063c1eee9b77 GIT binary patch literal 67533 zcmb5V1DI{GvMt=UZFldsZQHhO+uZHlyKURHZQHi(*XN$AbKj5m<~L_nR#Gc7D__+} zQZ*tk1q=cO@Q=${j+XP^GXI`H0)PO>h$sutNXUxPeT@MC$p5=21c23#=tnm9hUSl` z>Cc1m^Zci%jDW0!sECp>t&Hfs%*42~6b&Y|Ok5RwD6xb&2` z3P1!P>Et7fM{7o;BC?dCl5-|yz*zv6b3BX;Ds!OJJ&HF|eB@muRiu=nQzR<){ohAs zk5>2A|Ln{E-tB+<83Qv5YZFT+J6pQ{=rh2-_4%jjKQE-8+<(bA*_#+y7+C**$bjZ;7Bp}&VPz)y=ZA!?y^FJwv!jWD z4Xu&2fs<3BikuR*3C4Hl>ZEO*wS_qZT75u5UQdyr7P+OAf>5dG{1Vh!F3*Tylf!z= zC`qIdo%=5If>v&RhyxiqZ6-ruepIK|*vJNkkzu zqO_Iy@E`>5!3D}~T!b75%DN6Ux;3DK4uK-YGR`zbd%@92bB*_Xkq#=v<_OeDKvMH& z-g6>$^oR|Jg}_^W)vXxmdL*Dcf%+>nE%7E$H?s@2p2(W(Ky=c2>id%m!VpOi zicTMc6i$0-dfG)6YkjCwbyXMQKku7B=)&aL{Y48C#^YyUnqwUhx`_n_(*)!~AwbM# zVp@kt6{W62iu?djsS--U`Xeo*8;5^!UT5(I@`|UN=v){;f4-Y34jh4LDUTTuo+1jY zFz*gi8PF8w+bfW#mdo5qh$=)W?WwL}oMybu7Cb^g`wdS;TEtQWXn}N#ixM1eR!aQH zn*|o_TY4dUH|Jr&SZKWmvicy#&P@C3z~v-WS+I+%9Sh0j#O!bZE5Y`4w2b6zay$Au zZb}?%kP0d8#0aNrsJ^sljlUws-n=}9lx4(cOPno=%7jYVm)zoMURj5 zTY580QOs*d9P}|w4)zgH5oAmXm6N<)FF&TGc8Jth;C&;Gc>OouZt4=lbktWZ>=PI; zS-8aWj082(xLwX<7(tOus*`ctF(!OjZ<=SOii>kzB3Xw3uPDuGz>%bUWqkQ;C)sJV zZ>Vl3p!5ETcK|JFzaU9Hr6|!fqAXt!#HUO;|B?=Qo|Xq(<*yHzS@&sL0bxS+Rm~As z{8A-A#RsUr5L7z_1N;T74Enr+hTItt)%*5C;v@IrH=C=R4Z$6uO*2bn(riy+f?I2P zg{~v*e@ZF{6V%SVrRzjHB6$foguem*1Ks`!GLrM3q^v(66ATUjK=J<$GIB1?{}W}D zb$(C=#dqt1ES(x!CN~!?5EgB%WCW(QQ5iBjdXM-~EosrzD2CQSW9?xO+xt`5Kx-$?tsxyWCvS$)Js#f8P3(f^Gf0R5h)qtl4izTeshzhlGS)edYa58 zr%do>G59p1Ir--K_F@LAF)ch!8+GK}Vo|DGUKORO!Xdp%r7@Hj@>h#=JSn1LY00<~ z1^BaN;2k3`rKSiG4U)lS`^6p>ZB%wMnsw4vO_$7oo~mE^Veiy4o;cH} zu*>*#*rF0Q!`IhYICmSp{KUuQGfl;Pmy{Y)cK{v*67zSt=~>d~Jes(Oc+8nZ+y>e6 zEk0YwW1WP2_-8s0*Y&v@=v(Q$cwsxFL~o1|Hu5K6GZVfp@P^PIMj=wUeRKsm2+5}3 zctob0n0j(U_(Do-xs+V;6Af)uzVm-n<~Ksy0BJ<)nu5s3XYu#Bey2KDGK4cpq+sQ@ zrEUqtr}{z}flq4r-T>y!|Gso`#GB6i&k(51{?h7@eot{^8Q zalNN}QhWbrc-Z_4u(WmQi6`UhHK$(YH(9oTH3u1|hZNhv7^n z+Lf$dTV$U#MZcxw!2%So8>-W0=0IuYD_V3?(Bj*O+4;Mn;^9MQz z_`7diL-sQdTgPu@xMx=KI`Y%0`Hv;PwPkT2ohuPfs2$)IOt0pspGtTF(c{D!S04IJ zS3@sX=N<6tXz8+QjFI(nJ-<}ZQ3o?u56!|-kVmN)}7~4OrLnR zzo%hEjoN&&c;C54iLYO$IksP?Uc1M=y72E3*?7NU5EH|nxlZ8TS_AdS-8Fjss9p*K z^*Byt-x_7V_l1wyI6LlARs9@GoWfgm4Z33#DxikTePKQ z(QFuoDovoYwxtx3%z832Xh9xZTKkMQ_57h%|#mqF^-6Wrj#v-M3b>{Saf4; z(^=ZsV{D>D4mv9oPia<1PM$eyWh_*qF`FBflQmi7%}N-|9u)jpE7yBLVLmr(_vyET zzwwi;6>2!iQ^6gt4rl!p{o7jSLK$qcBvPksr83f$rCki&i^U;dX@kUeNjNO0Bdtfi z#!%(n-dLY~4hs_o>LuEM&4drzKU=jK@!p7EsZO!`5imWrxGF(On@W3 zFE>}50A&^P1RG9sp&hbr`IVJrd8ssuzq(2G5k!OS+G3>&tR7@^%{03^zo@XvQ%lS! zsJcoe`aJNVDQPHX>C97?8q{KvrqPAQrLpWB)!4#%i+W@o1Mfl?5d*^c2}I2~^!?o- z6xC<<>S{~=Ibz8EnNvV1z09qCFJ_2y+w>c)u}eYTbu*^vY-lNFDRh#zgFhOps;{c6 zO^3Sj4dSKalE&rHg=VEWNUyLrOy?d89xMh#^#Qa7@*<#ffw#Sh`Q@BbvsulqOsNiy z{fRMD=_wq3I43WEQSY+6(zf)n#CD^ru&ro{j@-&FI>%xZlQo$kaT$?KEaIc#KtICV zHq4?FWwc{JJwo5rjb3q+Z3cM-KMJA_m4#a6U5hQGJ<#6Fie$#4go1tt{^D9_8+3O~ z->hZ)Pt*1c_E~gA7=A?3)j-}Czn%C@R9s=Q{X<)~ z1~-2>{HL@wKzgYsWfalQ*_@vordm!Q-wGn9C^1xSC_uASZ(0I>ozlgTcG(YL!-Ryj zdvyf#UhRJAlQTAKh@hWP+z4CDkaCJ7NBRo#spxQjO7l!+xTmD;Zgo}d5a?FZIFqCB zV%_1LKs&2>>JX8`wFoz#%nDaa;h=;u&mJRvN_ttOOD;%O&mFR)Y-?e0E36$}s-X7( zc0{vHEQfeH&mNyzp!0#hwSGb)Izkb(^uxEtkZXdB8CXxZ3&z0I*mGVUSu zk6q0F{p&K!83Y`+Wj`Wz5#1-TYZr;w zP@LzpQ)+`qwx{IOEX`b*eiFpw-N6!sJf?EWzB~|_mQyYpR1Y&R1JYrZ(2k(5*A2-) zE8L2^eBeCKZkEsRJdIzCdxL$ZBa1}pKVAYswDi#r{VuW_8<*dHY7y~8LZbQ(c6T?*GWAgfBo5wp^ zIZWL4-1og$j4-!Z^?zMuCDgX&B8qA6Ps9m}&&UE>$%PogZ_c4Z+lYBS4C+*HJWP8%`)>|+nE=e590B$w&*++F3xqHqo`e?+y({MvXuI{2f z*?5$lro6^6+ug-U-Jig&ufj|yGqdEM;#RKK+pz0df$Nq+l2Y%>{rby9qS&-m#{ zet(|$5fpw4JbY6ZeiNouMi6sP7sVDu@%{eFo*zc~44e6Om;8o#gP$i3{~~(8mr3=5 z`IIYo_y+Px80(ncH+>IY^&PXGPP;6gY~o)mn6ZcJRGA^1nM$QOl%SaYcDNbz4o%3*Mlp8Uu|rrKb|aq7np)b%D%cTxz(r@F!B$q6RfKzDO8i=z|K2 zsW?_(C8DYE1@oOaA+uPIV3^#7wkd%HO=~Mm*5EUHLg@))T&O>~LEPCh_%e~9muvekgnTREkeoL)~UtN3)# z=s+}{4Y<-lSLIxfnba&UbNXDq4Jz|=)Ad9tN0Ix1CM%8JZcBVqot5=A* zk-b}cij(XSIp4SuMIAqXW6%4X7cZXMwXA0-h}FnGi9tOVmAm&Xdw-emv78fxT7k>= zPHIy%cC&J{1NqRGTkF|j+C|WoeH570s3!}f1{-*T>lwTvNoBb~Hk@=Y{#HR|YT2Sl zG?_I54YA)p)QoVCUqi4#*Wkk?jkrve?iYAha~4ZyoY~u?u5~LvUjL8pgu`g74$arz z@LG&+&RSU9H~uDE@?3<{oUZmL8>?7Quj{hl5~h`IF5@YZxw+Fhqhp`hE}?D_!tJ3xdx=4lQL;7h>Ar!vdd0{uB1xmY@vV^WbOlQMP?9+z@?fMaYl_yE2} zYgW~yfw`0P+!1{P$6NykjLlGvm>=5IhJ@FtDPY#Iu9{|uWM1nfWh0NORqmKg0pr~Qn2u4ynh|zBOWz1H zYK&C46Bk}?6Yn5cQ!V{=Lt1?Wb?Lax#< zxZF%ptrL)vT|g9bd;vcc&{?y|+vAd`E}3uPZL#m;2PzWASm;Oa5@cum=IL2BWh!kV zHEFT4X>=)J2F>&t_Ycxrpw=ZI(5nP+gaPY$1~GI$XIb@lterc%mP z(@(pJ;eOpFOG5a9iv_utPFi&GDut8ej%8EwrDM}~ciIt|2_D1B;G}W0F1TDO26Pn8 z>k4>0JIz@%Q_a{`P1-Ke!}7Kdmlm9oP?hgDCHu3kPc#$?KJBY=<~S0T2V&TG_&4j* zj+VnxbC2X=wX+yDBkRA#jMNs(k07nnI18u#WHc1Yt<8~Lp2oWxAthEzP-vJLynCJS zIf$U8qHEA<=oY)!FPPkkl;Roy*!mz+7}i{yuvW_9`|EETNSB5zGzMs*ji3Gpmrfqp3|hpk<2SQKMK zZ|Y+sS?mmX zFKO48X;Y^~{6NlZQjvn=%fjHYvLS_no0gE$=)EAFhp3mjaiTiAwPeMiB41MT<3Z`8 z$2!#X%ybI^i!I%TWOPeg+M3kRD4XnL;*xfzbiJ3nW$f(J6V#=^U%d1^L7OPos3G4n zb3cRlJ{T2JsQUIr*cnF2;;2$pC(DGsv$giQGBl^frSS5lyp4L3AC(o+mohA4(O62% z$hTgZ2jo?kD4qgVsNt&d>V>dZU|rKGB=FO*VLhPEoj|J4Hot7a`IT zXMnw@Gdeg74%!xKWhiO($9y#sV5p)AO_OI6ySdjn>XouTw>k*VC2A<1##~r|Sb^KJ z5kK{p#w|0JjF+vmFv^fYUN6^w#jDCZC?+V0!OF{vxd1~04G?r5Z_`~a9k%DZ45PD6(03SWUc4l&ioztG=LuN zm==FDZ>R}n#AObr(kBt!kf}N8i%$7S%q-ZP_W6Drn%Lr~vMv&|;p@*U4R846>q++v zYi2_#24h+ILFy}!@3*w$V6f)XJnQS{*7rc`s;tOp@VxCH|4G#a2*ZV*=K7UWD4E&< zK-Q&@`a)LzHbEugx@BG2$A#a^~OZ4{2>!aVNl9Axf(}rW&!5p zD*bjA!~s3EXRfO$E~O7gWf{Oie=LLIUmEpA57z~z_fXauVw=Ctz#Ao(vm3m+6pz;L zNTnq>xZ3?I%9VUmJ^U52=nZ}YG_>?6z3A;kSV;p$iJ`cY#S8T75~ZX_&QCt7oJ<#g`(*s6n9|)Y)9^B%ZHzlHpxC8Jzc0{pV|$l2zygX z#sqrCAmrbcfd)LR^D3IZ$$dAO1?BVvJ=Ulm^%(j^$~~;d8@+tgVdN)#Y_T8)%seo4 z3J9Dyo3H*Hyy^!<=f`zrv&bu@C+wpad>pyKma$Oz+o$u5mC<+Hl2hU)F?}!jw*1}X zW6T#b_WkhJZ{mJBT(U$T=;m{WE-f)6YgXCj7fGo*S^CsXs4fn(58ks|<|9sy@K({& zD(3KYY2xF|G!m#nFWAB_HWkgGjOGP|?ChtwKa^Pnz0-yEIbc)wy~M=Mf42;t@CU8l z?IFvb9Nw5G6oQZC?6+^kWw5~Sy6I|UfNMT-W{c`_C=i=JaDu@7lIUGK5?-;QK4YSW zzW7#OpZGE}_ucA4+gku#@rw3aWTL)=-1osaCoYX1B;HVl@bj29OwW}<>tlK!ocW=^KeO{(j+&7=s zZEe$DIx@+}Yx%cX18^Rw<|x?$n~sa-NU;Js6~MELumTs4UGh`70XUgJ)5|ph6-)5h zw^;+X9m?*&v_i8hU4c#gz2uMG0M4KR?(uHLS0~^K2&sc+7Xu4$It0v7a|J#g#oO^- zh1Mp32SOhc>Jh1fZ&7~&P98z15f2TJIs~Xu#{;8{59&eU0HIDq+;QZ9(I$)qP#eqd zIX_^~PvwTAPA2YAaU{L*^uVnVOafFJcih2hg=$gNg|A3J?QwBrxdCehQzu;pS{^s) zfprY|mZ%$9UjbA&^e=JbSLPrn!$DAi`&|c|ci~UYMKA`}GVzc5p9SOpUiROr|9`Xp z`Qv|U?SB)0{I4ciYk*a2fve2Gm47nzz)Lm2OLf4MtAOV#fNV9uWj}p%Wu< z2xZ@%0kuI$S|f4CzgLwrwO~GlQSfj2~BVVrJ54f0Dw3t`b7!ST-Z|va@*_c-yocnf;qjip>XZr~^ z`w4LNqi3fHH^&J#hY4_wBlw>}r-`4cXU7TrgA~32hV5a7?P&)5y%cz-iJw~hqm-ZW zpRcj@6F(*RyD4f1DuV}&S)*3KBj*xhp6SkW;=_B{Jsc5`RZ7zGMU36rq+gVoGInP3 zR@R=GGS~X^T%UF!wkeF3DU7TPCJT%v3qL87iM8q2N?*ckPeN@Xg%P%rD6x_#vQlU) zMSd!U5!ZY#Tu~5OQIKj8FyFiY_I$5wQIL)_czY_`S}NQVJ+5}DGm6ZBgjn35FIl84 zStKo4BtiTjAX%g>StK4=q%K)xkThCY3iaF1ok}ezjbh&c6T zaP&$%aW9s79Lq3`f2?H`Y;h}DV8ryrMknBw>i0pB!*!DR@^C5U#m4c4{$D|gf3P(S z;W)DFfdBx6exjsg{}QB-RFab=k+lCR(4<}FUW}R$q1i`@m?z+axhApcy1IEmo73I@Yx@H-MLb=cD34)0#&H}76oMJPi)C1UXC=G z(mTwI+BTV~JRMa5rmmE9n97W@%g79ESyfHf%aovlI;iU=mwcng**d#UWpEhnO>-yJ z^;Oz)S+gf{(d4S?UPem35bTX1XEz`qEQR6EAiauwg3fAF1tL=B--e6;u@n~i zuwk$)eO;lg9wlmen1&^$ZW~R~-O-Qf0uR~%j!<1t=1ggWMnYnxZvEdh>0MU0dE^uV zu%Mxcx~w#_CDzu18aI&}s9$jrHyCYlucj#0TJ9c^?_D`nk1V*?_|#jRAfe(rIG0Q$ ze@o+SibfrmQC-99vRFYm&S_Yzl8QahGb?FiZ?X3-%i{i=&28f`hoV@02?~Q2X^#d< zb0kb;R~)ts3TJaa@@7Qif8ck2Z8^CfZpsf1r(w(bi3R#1zXqR#&QAi)S}e+B#o5U% z_jXYN^m}OYr>|WRZ;SG#;Z^5WhR$RKY8S9c^S{CwykLBUJBE$4@AJ*N_}_@idEw%Q zo!ZUpEz-G52pllMIL^KZ!?wY|&9o|nA>vo&Mv@p(mh5W7I)tRhwaoAmown4&({wgd zBg-w!I+dV!76Pd-_&2*3%Q{IUMcP}_&)9RUDWCB~xZFW*4C!F_{U^5x;@>D)Tv|QG zT5tdW)42cSS^XDh=^r}#?=&HGNOkRHmoNDGjp2Pl*eEb&q&1sx8;~Sw>wpRjLIdLb zK`at(0u8)S@F)yZdM5qRcH}`)v$Z11 zn}K%Fi`!!tu&{o=BD>XIz;U%eKgquoFuo-E-B7+XhF!gN+O92MvZB88cHgmlb9VJu zKGH+|&>O#n`u(7;U~%~d{3t$}qxk+_`8j{keaY<(GkyuTWkbD85?%Cb)h&OI-$hXS zk*zeFu=c+rNBl1FPd=C#m7pymZWfK0mP*@!LOtc=6gl)N5s%ANV2~;>iAahBh^3Yc z?of_X%SQ!zprBMzt}-vI_=r%Y(4?7%60tJYE;rANI!Cqn<6uPSyf#pjS3WRmN8YA6 zYr5(fIZ}=CxII%65iPeEVTrOhmINrGq%+1 z)_&8R6001W;eB%61ydP0{p#RG+?hNrB1Yq4lhuM6=eoK+m@(YU3F%P-S98I5(HV&Wrv%Cz70L?WH0ktz8h zlePR6Hhnm?n1Av_Wo4H*5?AoKoo_Z5Q}_3qB44G)B{C2%vXn-Jx!2r^G^mBl$B0XL z80PeT8N1Z2SD2-vt+bwDghl3IqtqfMn-pvrM_|8Z4Bw4Rx*q;sMK@C41Y~0QbK+C2 zByuEn_g`T!lh;gLBw#8#yUFza!gOP?u zb4n6N48gQT%GT=~yIiu92@ay!&$LjK)ENu#!E}8>+TIYBpR=0Hh72I@9#<}Q;9Nt;wmYTrWCY5ij0I(9k6}I2x(Hlf_5!s=$Ml=7BXH8!MGOep<7Z`{h1Flz z8VD{fgl#l;q(?nn7m@Rh>o&|&WST0h9{Jf(G5=__QV&~ebZKT6YEzp&g&9r$XqmKW z+{+!BS=YH^8I8ds7m7P=n|Gt>#Z_g+m{U3WlYIsr^suPRxnj~v`NbA-a{a84mJ{CU zV5`C^X-gjj_zicS2m(#uO#EK5hqOGsK(G0%J6ea#j!jYt#=0wBtscTVje^f!wuf}N zz9y*Bf)$4aUZa}47wF-*Tk|;W=xkO&BT8Bmx0^B^-&AMD!c2u&vl{KlUXI6c31)0c z{qoixz5eJ_mw89>qb%tfvfC}wq-SVF$wYaGmh^f?-DNxjHN2&?tXGNLg5Bse1|A-( zNqUc$=eDseh^g~qEQx5?s!*vgy%A9u=?E^<{%0v*Os%)p8ff;b)%LzGrejmzQUItEBD*}A##G-?#dREs( zqIc}JA(>6gFmh>4X6CXgEzuaSqkBw)ZI+ZbN^~qK*egBP_F>_ZY_*OXky0Y`d!A{< zGr{>Z9yaqdbusptbqY%%t9?Ze12tiR%CzgoD@es@(%9kIleawp0Ma#^bv^Q#xG{B?*+ z2cp|~j#WhAxdu)9Tx1?w@Z8hfWgPb&M{vK^^CeiH7ZatzvIYr_sr&U(L9HYPgxSK+ z%Zyff@Q>;;mGDW{!V7d;yA6qnw@C*CBibVg#uzu_tAgBJ^o6sDT_=3hTYvU@}%Q4FGgw09G6M~g9H&CEJ zDL3b|@jo(06zM}WtOTM#H?T*WD5=xR$AUgWEX0eBqDX?CHRk9Bc+RUcfM)ZLQ=6yq zkI#{3_elo4(Uj=rd@-xa7(5=3DhEq0OB-CO5yGELJ9Oi$p<2shJg@q(5A**brdr|t z@Semt#th}~@ZKR>QsJ6c&mIbWN@Hft3K{}iW!9J+?zY5IlQX&xkXk`X6Ev~GQelxE ze<@?Otm`dAn=;vDHJPENsaZ$jxNoGMJ7byRs`v~xca*(0A#VvxgwksXBnqLctDD~O z9#ScHM^U3`53Zyxs_JYr#d?fSk~(zX8!>YLT&iE;CEyu$$an8(d{a58SY$cgq1|LX zZ4o$voswF?yW=Rfx5*!`^@^VM2&k%BQRxhw_7)mU7OryEJW8%JjEpu3LAX(%I?_K? zo#?**SRd*B{3zewzMZh^-ah2W8j6waE*A`i>}Xi4Kj)4>>!o|qDTKAq9-c3D_w3~H zi7Mv0JQd=$X(%@0wy7%)Ld|-dHq;S;#Pdeugg?#}kq{a6Y!cNH83kh<&5Amda;xp7 zDsl+2c@}NnW{F&#BMN3~VgEw$!I7Ict`^!! z(>Wzs>vd`%7BoM*bJuD~tCu4jJ>5tX-I+W+d&&4{sE1Wqt*=)y`$@S;lw)c5IM-6D zAj5C9KxQ7M*BLtOJ>^91$jZ*KVt4h>t_Q*=z1p;YvuLV1zmh#fW4%QTWPV&-bg!;v zW#`V%4<2IliK8(Iq`lNLks%YtSkUN;I+9`rgbdj2i5Ld`vLV+EpZV0ymdScJTdE^& z>Vnu)8SGFoV$Kpx++;Xjx4?bA*WTD ziIp4@Bhd;Br#&#c7S$VJS*ABo3*@XT`0LlG4S1)y>8IfSOBcG%Q$;p#L!?&z{B_4i z-B5@yP~tJV{HdE`K^Ii@HYq~ZxQ=0A{Pbn%b&cV(Rz2j(B<3&l*zLjQN#+sX)-6^iqcW zgfpcH>-ZJ(ognwT#h7hg8$HyeZ6hCH#C>k^rjOP~oqaY2n4@XC&mDu-XLG#Aj=TI- ztbrTz1)yIVEFQEFi&wC=GjqV`z>Z$QTWChRS>XA6FGQTP_{MA`zS_uPFIgyLS@SHQwH9k%2c5K=b;uA zqc)U2Z=VV$Ck_wf^exelK1k4bQ^GYl`}{uR$0khcz@+hw;b`%y0TxxCG$Dbi(njb} z@Rm;aJM!jd0p5{IpFi*pT4~;cf_EUzq$7)IjprF^H9_M~S{-svG!?tAt;cWhmCxii znR86@E$^Te_a}P2JcMEB&tO8bF)jKdgR$rOBa?EE`r-~9ph8%DlR<>=LEPN*6;a?| zZyw|a$1XRZ8*Y%#VYGW)7Tok9;Cej@vA>9)38ie4NuPsBpM#P%;58hl*Btq*$9-0k z*3HIY?{l0s_i{%fLEypEknz}~z&+H5T1WdGX%Q=WiJQP;CoJv5J{bg;xD=0Ty$5rPK`I=pAc} z^?@}GG>MqeS@|-j#dHUR`kQ=V?f4O9bKcEZF=q8RwDq&QafWo`FJ)`CV7lT70awB?5RRX+4+2)CAocF z0`WKPSQUq*2}OX{!1B~ zQU0cuCXSF6w#e`;0V$&UX-j6(O|u*>HRR{{KO)6}d8#qx%_h z^&h_#?Z5b~{^Q(|(m&5H2^cy#I~o`{%NW>`Fpx0((>oBYtfe&1kMbpJM|g=QS&A#r z`Ww_sL8#ZuD#XBDAHavaKgcfkqTzHPK>|(mD;}N`nDepsb`}57{jc({Abwh=@pRT_ zm(%8C_4c+OfckI}T)-m9oO=#&tRWIMe8;I?~89|_)T_Au6x;*7#0`Z*RWGxzLG z0zVqz&jd6rvoEdDR9jwPRhFeid?HhRnQ}If82l2GzX2V%;(~K(yIyoG5nIC$sPQf5 zrk{ldWcq1e?dq;;!YI+(bRknzq~FazU|dO|dPz zXtqQ%tOzS;Vy$@k8)5w_(;i84_+e`-HUfbrX{&w>60Oyc0%nypv+4G#g-R>JXE2f! zF3qRyPUu=2jHF4WH5^w3r0A_w_L7~Z{a8cxoxe=>T6l*;7hKfec>jQo+&Q_C|w<;CCr3&+dYI_88Y=e7rBnj=vPII*czKa8-ef^12^-m z0HHWy1jC9cfjIs>@SA*u zeLBbjoQ|cdi{bTVCcB~T?~fPI9nerR=?nFmo0%SBXcz7#d;Ip(8xq_N=Np&@SQw17 zH$_UbScB(aITLbI${24J^nin0zR}BZr$S`B6D3~!*=Qe337?*w*2OA!GnnZ97eV@l z0yM~FhrSt?&Uno4o=++oAwTU4~q?p5xM(E352mlU*-3y5D(;?HJ{T`h409@4I9H)%wfougZVo%El2Vkp%wIiD489xy@1`WcFDwiNI9Nj!B$_NUgVsH3|5w z0kfy(vv7{5r>1xR>kKic=YRSSsM7qufhxxT232-QKdyGTW*Yj21ftqS-HHZBg=YC{ zf+mZ%nUynOSokiRQ;?(zsd_Hvw`}j3g2kPnAE+|nZjThz#9JA?%HX*7Ip(bI_VxP) z%!$}!2Vq4P-_^z8KtUqUOAyAy-WvKOF}>}1=U%8u6vjcdQ&_C2+>Hwz24RF;?ay+s zJpne+d<*y+llvwcjbN9CyDkqbhLrc-kmX>hMlE3!lw zQ;LtE5j3v$8GvV#BNuFu_a3%f*E5D5BrqPY;d)z-&$6a^LZw0BFnl^4dH!@d zjbBlH<#bg<{4j=edUstvYcJa5_*=Hlhw2_-2E=Bsq>p$GG<_>5B$E@9jD-c{Bo4hX zRM!Kg%F;nZElpnB1E&glrJp&@rbBp#D)?r35UO53UUL-=FhJIYY8CS@tY&;oymQKsp~60f*bk+>De5cv)6=6~qXrAuf$ zZ&9sAE%u@_fONfKSb9PnaH#l-r2G~tU+N6l#{WV^BqR)({M#qs`OoqsBF!)pz6S{g z4z?h%H+k?P7wa{(`#rtD(D1kkw=2Co7g+ZL7~3g5g2RHzxn*?0cTX6uv9IbRx59;t z%Jb|<+>mlVXFpTtFk>Gev!6}}0{#f%GlKZm68HYjWfY$_3Qe1F7;;HtX!zK1@1yfz zpC^J0d-d9DmK#)e>Vj0!sYUAJK<&d2lV;o_w~$SjridehvkFw^og!qjNV1OvVRU|v;Ln>aY`08_SPmMw$6?o|E@tQtF}L@EFR=v4ru6- z(3ZIu347FkYUpSTD6}9=poIgQEjXcQhahW%K;(XleF1DC(sBEPn8YJ+6W-Ez0|8w% z!Zws5DxXbkoIAx_r?zH!zkXc-`|Gr12=F}>*W!S?v!{q4g}GxaJFo{q#nA7R#28@~ z)Mkh6Q%K)=2ZqIS%AT_48%gGDtKdqlxhy(VCONsB1^cs8&9JRHYRhrz>a6J`x~IIp z9cX=ifdO6?wNcUA+sbamk!o>H&V{6MEZz)(&asrAidU~IfV3+XO7Fq_oJ~?n$f&^? zFNd&ZN}fwLb#w`MF}6y2v4}qq4^j$`k6F$|ilP`+$du4(t8xSl6`HIvTP?I$#d|d1 zHF7%04+pY96K~iH`YUs&vbrY8glaO5pT(YFY!mSLzz@$6-?xblxnf6pg+_9V%d8OuaVUp9^BmN^IDbNi(pgizS1n;KLjFzKk%5-1o^h%mD zVr;zbBGXOFb;^s}i@cKVWhNWr1UkRzQj<3{OazS2Y#@-uE=}6x?{}pfTJbSg zEslDi_3v4uNLR}aaQvsf?b{V$Hc381=kyByP!^0H0(0V%y>J6m5$80N>Zg5{2fjK2 z-AI(mWdoi{HhI%v)99na*JXs)KB|GcLA4oO|8Jl%(d-&h66->ap=qE#tpU&!k#dk~ zGzGD6NnBJ#HF*+q*#kYICvQM&?7~U$lqQ6vd}q861XdhXTYBp>spSahSrI|IVK^*I z9}3}X3U9$X2Bfw6J~5Kw*1vhzyRZ*p7>1R3*8{PSFG>TAW&_CNvne4-UtmgNujI?( zdi>;iq#jisQJ{Q6%UMMI;~oX-cXFcn6G41|LBGx9W^KNL;FL~QqEqw@-ZTYwy~#Vi zc;=RMzmX>4Fz!#F59;pK(OzOgrk)L$CV_p!r1h2l`UOz3M?c8@t4+?&7cN@<*yQ$) zP4fNU*`$Pxjf=B^;r}qse{O-AEYfuYiE`Y#iHc}62dDy7gcmds8)qL?+&8)2K07I%;KZ?!}IC9o-%m0n~&Fd(!Jkbzli+_CyZx8 zcA1xUgKpYb;9&>x-q&r;w$I}M=~4(WiNz8 zu^19lYq>%sX2+9>Gwl;Ps_oEOf-|XS=pqs}uOAk?2Cqkp<^b(cLK_cBi81>t#LT%p zXe6`*W?thJ5>+9U#$p$f9KHkJ2or_pi=4B{sGSu9scLcC|05AzfusqSXfY$lpBqDha7t@i9<-t6{uv{xPS_Zb9T z2bU>|nY8+pp+oFa^IEj8(~|Y4!9@p{mCA}_$Hy9j3)qI~CNYD_kGG1Iovh_c{3Din zqo>-l2QIQVi!G>Di^*uI8oA0ytU9t!?M&a@=omg@u%1b4m@2V?i1+U>Tw z`}+F6eMA=d#cekF(+yX@W4^TPT?nmT^>IJBaXU^c7K6~d?&~ZHP-DN)JQxJCDz5MO1<#n{)_`<4n0$jO34Sy+bH0gEr?tE6%=zo zxX}0CC|RXohJ?_M9YG`PuU{1ZVH*F}b(*ER<%p?@{H<-?L@9YOScfEQ`qDr}@5Xw)uaRUh%EnwM7MpgV*(;ON{>;BryF4i=GnoImUpD10wOym z7jZ}7_6n>$RmX6Zi!u!D#_}z(mYb#Wv~7ZhmE?3{^DYUa6#$r2%tRbY1Ra4^?5Ts!aLt`L@G_G^G1O3~;v?Q-$of>?E)%k+vE zVe^@%_x5Cl7#{q3lASI1U0E0IONA~~>}m;9S*~p5$Bb5X%Ir@__YhXO{8JhchA=co zB6_P;5Qg1_8A??vo5gr-Cy)E=Eo}WRG}fr~%T&n5ojjQ|uAv+wXW76!HOEL(xBUZk%+5EXfH^bw8} zI77J-j^_^)IK!yQQU|U6Ii+HIf5LP2e=9eKXShiX&Cn6f&Jndz_(Y>4m(b$TXa&_ z?O(5xPm;PSl#7u;PXVJ=hXG&s^Aub!rD=NlcbJrQKa8={h&zY15*^2(AS&wmC>rT` z%Q)~`wUd?kIx@v&L6Qrc|K{|UjU(aw7yjEBTrcWrn#a7a4={FYt8N&hP$tg4Jf)uF zzBe$!{xjG;K^N#3*tH()s(7Mb+#rI$L@Wpj*CZXz106l=f&|(T2~rspdF$>Un0X8V zJdarT5W4-XTJbI+r}ZQZ6t?^-yksqrh+b%{LTRB`$hop^<5ECMFmcGEao&nQ?-swN zgsJkLf+62pWtzfgDOkwSw$N9nU~s>_saBm-Az7>!0Wok2)2Q9H8+Io#AU%E(+o}<7 zzr9_p+Dl1zC_O`7&<76%(|QeY74sJ20Gy)$|9q`6$t>3F1%)NY1J~Nu#o>w|hv+cS z_OnAsP$;KDowj(?r^*)OOa%9LkcoHmliL$-txm`M_(}!K1FKU3oj&9wM zvxOYNBZ{9*+3_07Xik0$5QN8 zlO4&Xol1q}#hAkp?#(Mi0UkC5`3JBkH?!>A9DPsU^3xQHpM~w!i2Y0=4?`fmcGwPn zt3a0gM!0?G0K+ni9~Vfd*5{MO>+qy@#kx%h3zIxdWE@fLXx zBL%Q>Xm>TS&%jgVCkUqv0g~3NjUlLLx#%CvPwV5?wG#qo0k_T?{F4CuZM*YB8+Q=y zF`ITr**_Ng2@PB5xw_yR0gzh`Pl53_rBtdunj#ZQc7EOSfJPY1?ph;VsUyrG-G5NA z$3If+a|TSuy^5=E#D$V>DL9Zab%Y^}uNbikCGrsRYF=2Iz5IJa{cC1-RNKUJ?nZV} zSKzw(l&WScmg=iUXj6zUIT9#Tzm^m)KEMET;ui|7LG4eyufmZSz!T3th;?@hkHI>@ z+q|FmsW0=Uf82nKHj;=3Wh^d=e>wYOj?2|+<^u<9+|UN>cLa8kCRl^MK!DV_!0iW= ze=HkqC<{>O?-gr*`nc0kuAzQ2Q1yn<1xhZ2L%ew zJC^KQ&`ZH4@5_Z(o8}evUn#Y%r1)vX&mG+U^Zly_*}pv^i0M0;{m%!4|GJ5Tq~sy` z>5wKeBu?Nq;e#)GPLYLXWA;Kw-InK_6K!6CLmmcz+{o(cfl^}w;YQydyZ~17&>kKz`kdgzTIdQE>y%!nO4%km85=E1n_wu$8_|&ot8lS!O;3)&^4?r(^ z=guCQ8cc|kzO%1g#%}@&A;j|Mw65 zw`x5}N!tn-gqZMYe0ct0C|g3&GZ^Q??rN@fsKC(O@7}bJCPPvW~5u8buH=U zJx8*3&7xz9WZ6IdHu(_M3FR?m1v1ApzbZV*e7=0hjycJ6|Mq&Du>EBsKeJ1QvM_2W zJ{Us4gJ3^4=n(uijs~R#zo^V!J!&r;jhoS$%#RX5KtZmBs*PM{u^$|k0WtQztqRe$ zB>n8C`FAT#h{R*<9Sw|)qHr39Q? zmhsM&OINjj*Ex%3cgvbJ=+SlZidmaa8_I>H&GUA82Q#|1Q7$VCxV9erWG7ME&0T*`IWc9`P#jsS7+++%-vtk)thKmT5J)=l53Jo&rnF@Cx)zMD~Rv%7}>emlVDx`sV1ag%J?v%` zHSKEXsLvk4Rk^JXtPkB8>fQ{t635xZgSLVAEFDecJkPW$jUQrSP~L;M#QfK#SF&5Y zEa%ksLo447Q5CsG)?48N*f{wW-X2mTHc?g@w%K$U_9?Hx*FG64J-#5uW|p3la1|#n z|GPez$%CRj5fwd@v*kn#AnRtf9h6$@iZ-?8n2u zk2iG7nLLx2jJ$+Hs-l$0jUi~X+{08=Jo7vGqc>^nS%2sOBmH&ga0paqaSuXQ;YC)O z6E-3zLzwsFBzcDl!&s#!@1I#|28VTlrAEVHAj<3U&Kx90-2fS1xfu-p9t3B5B0PG1 za`EG=uZFi*f9=U7g}KJcA*5b#fn7oY=DZ@WamL)nH?EGvZMxwi&cj^sIM6HhzQ5@8 znz9%~nFoWiiL(2`5z*J8+(I}Hce;I`TRobp7bEKYo(GnEqcy zz`x|FxRbGizLTxP|5DcyHh$DKxZrPln2Z2PM>2@Q*u>bzFv9cxssb@(_WeR7|G8(n6~`# zYj?>xn{o6j?u8Ukk?tz>7Bb742s`O$FvD4$Yw|o2-|22`F=a^aPFR6i0HVEJjGa9r zIq0It_h$G$tq0%KB)*N0WrtRB6QvX7-wh`1jwE0uSZwclNb8LIE8yNeHFuSI5dR8k ziy+nn#xDJ!Z(;Yk+=j&|SnR42NE;CL`86=vr4Y@R@vGkqXf90X-FY*$Y%9(|u&8R8La3AFLP zO?;aUCqLU43r`Q8Dtq1p`c~{YUd-m2);REUD*hnU)_*rbpeTOZ@Jn)FR&zRJ$f#yG zyRX^osi(L0Z722(>N6GxnAW^>5(k&osIo@PH#~*%1EVxK8hu0kSYw2NUIj3voH~UP zE7l-mN%^!pyiV#-sh~EtkFc>eYwCa=11rby4w_3Gb5jJ!Q|hK+OXQmMU&8@_L7$}X z=Y>cW?%2u8NC4|-)7xjumH8DyiG@4cffKL z-$8j!(jw8s;@!V>#oVn)ausf#;ozrFg<6@tV+2Oangbb}N***|yr4gNcwfQkm6P(f z&^)R9i{~|ZWioi`C|mF?Zlu|1n+{ER17*5?NL#fP%2C^z65A8Wb+Mt!sxpz4WdicC z;P$DQy3ITxqEAb6W8bCV(aBPD(U|=^5??)`^UHsd^(C3O)SKg4t}EJYZL=h>zxp*< zj4Y)_!Npn9ZX|)ldDHlp;1d)y_##@?T{_E-{Oo)(CALlqGk(B1Q>;LbpUxvWO~paB zAHzHc1=LADc#bU-c_vWX=?} zAAPGL6d}I^6uD;SUo92c+Z3)Pa149FQDlDl1>e3UaA=B%>CZ}vty)Abr5O!voI8sc z&A>h*Ol7BL%XR4ADn*bMvs=V?MG6*w2G3>}xxh7mh>XSNLrEZpW@aHo!My!F%<9D! z!sf*jgw%v8?ZB#nZbfAcV|k!aPY(%#y=*TU5lJ|dKm0~Lg#o`(#3*!CF^ z_23tn^jv^434d%zwXn-UUX|}WuHK80>OgFUDKCi>C8=dJJGc^O}-w@l&I)I?A zPFfIRCJ~6mE#o*!Q(c-aG0;sLK>|+5F_Y*;_*9ro5Fs4&7fN+;n>IG8z=YW%#7aRvzN5C7!)zN^z-Ij^IflchKMu?W)XVpc+^3*nfjm_gWlTW ze!hs2n{hZT4>$^mbu!qDgH43~;sqCHRuYO}7Pc0t_n%Mp4xq9Yyzl0n-VG@63RMBb zJ|#LN*ms6EP9{L>BgF3#epLlN<{@mQy*J7S?U$4JZLIq!C(pMc4@RB`+Q(=7I6xnb zAqE`+HrDDKs$n72D38Y|f}?#()BKQ0g%e+(l*a&@AI=x}_dBA=0dxsHNuGHR;WIG3 z#9}VZjWHyNQ~z;~VB8G#EjE#>=ie++v9kf$qJF4|=}$T`{3od> zY3%-QnW!)!)Bp3#rwBnU2ahZaj|`7=4&)4}FDON%rzN%;!_kMJAjXODNvT~L2FcqS z!?4YdV62?pSmNq2edg`$?dt`|HlzauUs&*{7S+e65oAekRBNL*(2F7{w9W3bmx{l0 z%x#MI_BC7eLTG<*%YSr^I@9Ls+9#@Pu!6M`=NxQ6o~m=ra2mB&g=aTZU?i3_e#PUv zn*`wMnKW_H-5Pg@(dFPllk$v1KTzaQqRbp?_$y$^Nqdj^_vUlZ%xvafjbCK{7#pKx zkTf|LO$d%4wVY}}C&!k>=V^}|_HqS^L8vAjd{b={HV_D;FcH)x0~YY{0)nis`(R1_ z0NBqKrE!fZYs+{ZkF*_;W=Sf zP;OasIdv|XbesrU%d>9Y+WQSy7oRqiqu4B|u!OvjC(wdVz*q3?Ln^&&%kO*Qj>SQn zI2nOJ_@hCetVVE05XnIIQ#C#mvGhbWl!@V3{1wowW2OEl z;3lrHr`jfX2tp)hWN2hq^!oc%7z^s)C2pQtY*=!(?OgS`Z(xInH=YDFnrL}EOqmRuN!ys_8$1Jq5X06@8qp`d45%51@Sg8{>hz|#<(P8;!7S_2sb-YkWvW@~m) z)FsG9w!a8zMmlWpUKmWJs z;(uD@6sbXfCH{bm?+>{AM|F;hzLm4_zr)MM54^x}Ytj_SA^!s}losUjiz^7akzqoS zun60#n|k7kQkr33NhCT5xF5fG$Jro>bPN@gV*CnXw2Ks%2eYBV`JPQzrx%GcW#p4{9H5E=DVG^{m+!+s)W z;33fW^d1imK=1&!M1*17b70&Fokd{pq4tJxk?`TX3Nb^1bM-Y_Erqi=nu&o!Vb-Zq zUt3sx>YJ<4D;o8=jL~(hB=^P# zZo(qyJqHw{uX8t2k-v7jT9Qecal^dsTR4B5%fwkOnQWXuYF)XkT;(~W0cZ}?`Jbok_`l28nSaXI;U4FCHWr%H#K3%% z)11a8?A{>DatSh2Dr9@IalegawKI|B_Ig&~{WP{eN(x~>u)Tu#Cf1<%^6*sduFDS_ zX|FEJygokfFuNexYi0*=!UKkwD9u(@h9mj67TcH%q$;qJ(3YVPl}GWcqM$@RZ25QEE0>zlL#5q z!|ZnTmi(eL*ELw-+0c5OYlBH)vhz=Nd6v;(l8><-oQjbfQHGCu>J@WRD)d%0e$%%y zyvh|v`m`j=bE=?Tk$JNfN}A0T8C6#d#V`5iH7P>cER>5<2AFW7c?H3172JWm>9+!< zaHY42Ql6u)_aR5Jr{_rS({%mjI8>($>lvl2^eQ)WCYKdic83farAGS^&5%!cdV51mlbr^u7=f90?{{sZLg^6EqKO@_! zIR9=l@PAiY|0_0%)F9myPuzWcFT_NXqwB1!_`2Z?q0q#G1B=xSdw0Vs|gv6q$5gp9q8PcmKma8s|rI(!p;Wk~U=+T?=8kx3&tgfy#+lw_fv^$))+9fxb zpPPPNclH2Z?c^v>x{%=X)ro zP>Wu%akXBwS6sI5_MUIwb+nR?Ru1=Z#U4UPx`{qhysf@QSDUqAnh#<=PX}^dZcAPm z)4tlUULnyxi(q|&R*WB!&A-~Qlz+ytY)6Mzj2>C%KdLT$w9{>0*glKlUO3afx|qK4 z4!`GOeEWId@2Ec_n7%FcH+&lRb_!pkGv8cS64InZM85H-T#H_1&A%kYzFL@c6VARv zsk=@0-${J%BEBv6LwKx)GDv(52UyNby-x=-2nN5?QXlj~xZ4c&%VAp%r12Bzk)3-2 zMv@qm)5^4p8KjO>{`fB?NDYg0Ak5X(t)}|Bnlf;|jq*`d?XDq8M^2=lOs94!@cn_taa2>JXB9t zXZLkMl!pYiD~#1SF{Ld7YdhKV;m@wTUT1fAyhBXWOvRsVEwvfyx|*)+R&N!~N{gJG zehEqGe|B}v0o@5*&Z<$g*;^5@AjOcdz^rzKE~8?b&oNl5&<*d$+)KE>?V4F#>QT^3 z6%Z@T?OmV7qHKGzF>fYe$j-xTt4)g~b%o@M=3|a5*ds(2iI;fc`QV{T$Hc_12JdfC zgDhZOPzSi$;A}Ru1^AkuQ{NeJZSg~Ec($+HEzBs9UABv=U3H=ub%|pr~BJ%)_y;uWw_@!i+?_SlTxIJtcO~vm73A!NC#d z%|Cgr<;FX&aaJSB9|OX4z2Zf}l(5zDy+UxDGdk6GFFQaAg^0&$`x`@w07Vg$zKmcb zMDpJ1OO_-hdN4-s-P(+#K7L9pPkPv#h~zZaS9d_#n(asz#lj#{&{Y6jC!7}N(Xu{> zRS>6@H`iI!Mm@28ju9oi5Sil~VK`PwHsi3{8ZX3w7>0$d=#HK*1#1!xP7=z>jUgS) z4QJRWq=A}cDX?ysuzUzp5-m4K;h7r~TcA?fuXC^z=XU?gJ`tWSF;p1Dx*k54-+!gS zu`ambXZHjV@h1Rr6bvk9UTt@iDgE&5+L7&5-IV@Ja3eYvhh%(RNW?EYbruune&dz1 z$hUY*1XKhhfO&HN=?mS}gbq5`upOPZ{(iTIT%apuVPN1fDP4(@xH}!?9cr`>P7nB3?b4Ce=JJF}|paHZ7xDouIGdXNw)Kg+>hnr!7GdQ? zipX@zNk!#L3V_Cc^SP{~VNX-0x)lxge*M*S`}GXJgqkvn5=>{c7h39!CJsr@Wm2vo zNj*@2!V}q8xoNs*YM*&(TXy6&*+0N$u?Nq7OZU78f!wHsy=`0OfzDhi0?8W!tM-DI z-MA>>91s*BZ~&57Z>N#Yrek8JQ4XF%fTM1KyglH_HtmrsbgEX4K0@7;4()@ETYp>e z(i#+Fu-MP|0s`41qEjxsnu&+aMeCG~qM(~+oW1j|c;|p=v6rq;E2ed%h$?w+ZcXV` zclAL_0UnO0-+<)FZQFDuU2<9UT9hQ{p#z^}OeW0d)09!mKeu&P_hr_T7`dfj%v|dU zIW2jw={?6pDZR3HDi1+qT%x_%j+5BZ-LBMi5XQ}fBYm4@k6V}HM2OBQ0k46o+pbFK7!aFci!*F@<@#Wu^^Fa3ZR->IzW~{WUD`v|-yZD#VOlkoOGBCEEre`!RAlj_+Uin$G^| zWs7yq=H!(n5DT0dNVOPT&gL}3hdV>(k1Ttd+aVVkCC+-c^Yf@Z(rLE(`!m5?aNtz|t|3brWbugK|3gNlx=eS@B+`jh=mRPK?yDs<6w8>VP_l0(KK@`Hl7GLE;y zCS4&$HSMPQi0;O@=AP|u%sY4L#xO!o1n8MG}|J=N> zfU~odO>7kU8@!vREZDUeQ{OKE#;NumT&bwanQs(|ZwMU?eYbx zGL)XnPF^u}bsaWYk=J!nyWHvvVJR53Cr16*QO);uWcOa3ud~+AYPn}=8fM;n+cUZY z>Nw|&8-zNzX2RJ>>wsQCxSq#+sq!S+kAzQv&?xPY*JexbFa`LR2H6GKai*<<$F@dy zKW3?9;9k(k9D;B9VavJ7S#nvU0uqw#cMWEAab@aA}D;2>fAD}MaLg#Dm5Ct0)su* z1EJ#!_iUfUsXm6gICT+soOGUa!D($7yd5fxXJHy=CY5*DUexmLuv)j~?9x@6K%xNF zI^;i17!F~Mm#kviccms z9Z^On-!$BLS`IkUO-rQzma)%gme6?5fj@e3T+Sop547e2uUMvw-Zqdj>ej6|*q$LzZHj>c)J$oW)7E1G54MrH~zUnB}ed}&w9!Hs-CY#y0Qfm_)Cp!ejfFdvw;VF-_7ji!T%rAC_A;BFB zJAEH`tvIqEShxWid<7uNpu-$iAZ>Q^9TsshJkC$y`Df}BMFLpbqT|{=}9%jIjdfeJ0Uw7END!HyEHz44}=7(uNQbj$n^Rt4#GPj`wKF;sCZymq_}3z`;XzB=Pyi_B!_Igk*~Tcf#3Q$NG<{Csfr9! zn>8qQ_ay^h%RYym`1`BQFh)GqNrHm&x%;~FciAMR)(-3nH}Q76^%s24nv#!^f0Z3NtUgQ2e}5|ikJMM`)573R zy&8c#W_*E4I=sY+9X=_&?=NDa_Uh}Jz_LJ(JdnIrX4Be}JL!Vl)I79H8fapQKA;3y zap$uZ>{GxPQM<9!$^RSdp8;V5PPA+!xhthzt=8PO?viEDVT_U77!g(5MYZvaYE(fq zyHl%zbynk9+#3YMnwU~v^j7Xg7SZga60Vw}Dm_3u1sCIR<2uhl z;>e?Wt&)df66IY$&kagB{T$6m4FB%0w&t0+7A z$-$>0o3w5L{o8ru0q_v+8ZaCaBt}-AEUY%o6R26VmCzkk)NpJrPAs*R08;a;qbFh* zsu&N@H(J@8jYzgA?b9!7%(5P#nqJwq#0STF9hP^{MCr%5ZU1h+uMB*_QXXVFWP&Ng z!NgpaAA<{yy0AwYW$ylHLiAFYYdyg@m_#TnVm1Lno^ifC_IS+E&OnOWM#_k1vosu` z!OWOlZ+x#^aUro7d>s4IfR|Vcc!J>2q=pMq;H)=0(0h3P{z@kf3c-7TLslbJx);{O z!x}M?Y~a-xas_io$E=RGK$4Oc=0`C?T=T=wgOqPVxDTU)1>X5Gw5}5{dR+OO;{9V? z!_w-I*@BumLE`0{!V1@6$l3LZk8%XCTek^_G5DW{2zL2V4^w3%O;;7P}8R!mRYf6#D&;hJ9 z_m>{P)z)Mj?sf|8IF+YIY(tGHZEVVElE!EryF@fNM1;{%WPOx)J)z-ExXVyScE%|( zwN1%`1RoBK{%Nd@pPRE>b~2%UvxmvtXiABs3vbV#PnyQjoN0iLyDig4ov$KPXSlf# zF;NNJ+>e_$aW72Wa31M2jALvE@sR{r-xT=cSY{`Gu0zo2Q8IU7?W?QY_1pKk1@A*e zhGj~vnp2EDF|vKs0w_m)z6x%lYma;uPBnJu0>VK}PLF`*7f6H}9M~3gel8F!MQf-e zU{uGR40Z8wNhFr?MgS*bmSweA~rMY5l;6!I9f7He@!qHU*pz@U{(&Iigi0uJ3 z$jEJ|zw2E#UzQ@p+R8IBks5^`4G8Y8AV|!fyVb(I&ZLyiu zld60uu)wT><7xspRT4`ECG}HcL)WIiSF9#$Io$5q1n&jUK(( zLKY#)MDJ(E-PBBJ5|~-b0)Jm0ux>OnZUNmUp)-X)q%M&AqLO^amcNvj?*Qj0@(t?< z$~gx5l5g;XYHUByzxoi0$^bVrGyy?0&SpHUS&X;o&-t%dxbkxm(z&L2T!j7C zrQIG*BY-t+@ZiD6S%3D?0I}i;SN|v;29&o6-3Sm!@`&UHvSh7oWza!pii6t(=RQ-m zABj7y>i4uzI~-mJ(KUj}x22}o0P?iUc4kqX8)vlI>dif(VeO&U$|>@^rB(ekaRJco ztwS!G5arAAlC4_^GKm1hv*tioglIHz_5Q;RO2yt2W)y1Zv?)6hO5F0vHO0V73%)%G zx8jC#Bg&<(31Ff(bln@vscHju+Q(9?hm$Dc zc3O*n8%vgkf;Jzl!rowChvC4t19=R{%6l?>f!tuYYe~z>wq&&cloPZFicTsI<`uSw z4*)x*ViFp-7B)lOmxvUk3$t?8)fOmw@7x`yCkjP4x z@iq&MXEtIiYKal3os<-`Fau;MDcv3!MTCrR)xJ?N({zhBR`JEu`jbXKs|ugIdUjl= zNqo6qK4E{&>~n&EUn<`+{1soV=~YFn_hRcE0ssNMgV61w(3kV`??Qkfm$O$JhRX?Q za|o=79(9YF5q-6j8YasbK^nBz8V1jH;QF&-uP{t*;~oIoJvL=~_fcZpNp0v)35{-Q zbZ8_sPG>A-udOr86hk4F8b>#=dqb%PQj*h$dd1#{XLKc=&}c$znrK$X&B0`t$>RG zsM#Z<+T0)v?4CmD_pzhN82PG-a6eYS89@Y z3PnW|bzyK*lu_uu`#j*l4$kqvVBIc>V3%U6LcQ7mv}#8G$O}tC9;gbp9*aXPHe@P z;g$4?y+99M`D{JhNnp&bcr5YQQDjINho}fs**m}%eLY3rw2K3+k)#CLrm=|$@uA{W zD{`+3?p-=&Ts@Y)ITr8s`03O z+PUu7x077|L$GdkX9U#x5$qOHp7mRrR*H8D)sSXA_@>YGo+dCZ5R#tw zMZrK03c}Tn10G4+LpHc^)H+Z=zv`W2T~Ong1Q4Iv8*;30`APsoHn_Z5I?hPKGOJk7Osh?J#^)&7lt6)C`3{$nPxIE-T zD3tPpOK*lShy`|)7j6`w)dLnnUqf_tF=4RpjaxuqXO`Fu9rcQ)@ov4|kSp(^QHs`5 z+v;&xdd0p~ayEd51NCk-)^xX2(B8WtqWVJ4r|*Cea#cVk&~hT@iRzF`VIn@1)O|dt zNEcDMP?UYX`?o*pse2rdv>V2a=8CxFtO^-%r;;mlCHOFo2e3pAi6Sn8Gl5S5Ov~Jd zcUPO>U21oqup7K=tD6H?wP+`K@rAUp)Xw>S*#+3}Q^|#jLZD@_b+);q)=hOra~0I) zcI|bkW=l;Yf}y+tRrRymvT~O$=pGM@0_DKoUuCto6xj>dK;isXt}K zcWgMgT@(L|T%aEe5KZXZg@~!XKTQw4fN>hw2*PX~o1%_J+V|A&hTkJCadYYba&>#7 z(G3*ZM5LdZ#gS)TYMs(^^h7}qEbT6*6AR|a(GsJotfWK_=sSq6n!r8O(^;-YmHY{{h%YMW|1d6SUvy_2k?oW)g8ZsY$oLV2LO7bp{E+=?964YVr2yvu{t! z0*Mr$1go-2CM+!_;?^bIhT##^66t~)15ecAYa>w+bQ~IuI}P*{5p)QBVs({P9Lo_D zB?o8PVzzU+7KPkpDT2E#tk>io`q%}@$AfjO0L{$U%$V_brk_3J%cfn@M5@R!MI=s^ z{gv|cVGQ9sZ5{5z{OpVphAQ$ zL+Xb~T-o=8{grK)U|XWfHNJ$+Yt zZu+$4*+@UWEp%p%*H*_l^`@#>-OY=3*{7#~v6aHrREp%Trfsa}B`~ zFi<>e1C4bcHb=N=n)(5rnZ1UupOyZ(AVKyny{?l18C#CH92ks8wkD~>dJb)5$U=>| zTPUzqX*iG4<>1n&+KAbU@R-uIj^%8#DuW-dsV0w2Ra*wrm142HIg}YrUNh3F%1=x* zs+!XGL06&kf(@RGYmQ-`aYvM@7w#gOm&*CX?;52{F*CJ`1 z!WHJ$n0IxxvbCIAL#n=D-6a8lRxF14F;^nU@vzpyh)lG1QGHM_o9Y2AD#P~0HkEJf z60*=@Nm$GvdzVl^A;~k;23hahij3yB$N)U->q{#YbusZ;=SE_?S{>+bp%V zvNMF`U-MFe7F%?47n(kaWmZ?TBfc36wCh?op|3j-+e^uGFc^41jLy&l3(L1#4)Rsz zSCt~Ihw|DoGhFFKQ-S6hX8ql5JgR*FF5nzZ1#+j7V(K-#fmu=qA;qjtVOm@UD(cbg z|0RPc@POnzR1H<_cWu%*kcD7BuUW1Xm_ux0E@8&ztxlb2bce2fvrwsGvA7`=vY^Eo zXgY0PIERip=5k3n3Df5qS2PuU5}_w}G=S9CY<-t1;=T*D%q~I3UwS%g@1!`&crL=> zBZ_Syie)fZR963`DrA^WW>bood|W00eFP+TQjH*Rr?&BL+og^6O zAPQC!Yxx|hEJ|d%l9#T5J;z-*Uo5kcEwIk?QkgegDTh5%C)0ca35q6@ai`d_ot)uQ z9MAMjXD~EEZpDQ)3K{gM*GfGzwsVFH=wv&KA$p3oK(Vc4FFmA*n&6gEsn3B;KxgPy z0w9t`e5oHlriASsSeOF`eW+M0KLY8QDv?&lhu)xAtTPhf425#(mLHu0;j+7E5|*U_ zbBkv@95Ac(vp*oQ5<7KTc#t;d_g@xRjARp$t~7d=z&zxKBSEc?DHhr~>Q6=;`PvX> zg;V2FL$~4mKNIhy-x{0PjZ8xRs1H6Z>~Q92mP zKg;uEncE9jG?AY3-c!G?wuAl$Ti+BV2$Q8*Y1_7K+jdskwv9^Lwr$(CZQEvT_Vmu~ z^uPNM?{VV9iMaQE3V+GowPC&w%w9#Edi@7~wXjsE{NRUKcJ=e4V*c~-P`0%+w)yWw z5^DvSA2|RrPfcXj8U3kzZfStxZGcNIv%RL1zK{WQzC?wv>&&wP=jPV>VW(|6KDe#D?R49TPj`-(ZVymvfH779d0J|7+<-9_YxQa?mBEfqSX;-d=JCPKV^ zy&_MAF}fwi_L}=(0jqBtl^OVXpI*%~7onm6ITF$XEOsPA;KABBM z`;9(8%EikvWkMem@oM3gOrvvSFqKzW)ICl$!6;?@%?ewxyh=W^mK-C;@K@rm+9$WK zRNAbcmcR(T%!ukjn`zc4^_9)SF=C{%zzKv*oV6K{BtF#jlvA`w-MjCH@0b++^80|D zcxv7qO`EHSC2K};PhL2uzd(GHHbBgI1{q_!+>hgQXPFMH_(t-XFpqE#`>nIb4g8rx ziC)WJsxsP_)HW7vC)U#^G|t)gm*LQdKh(`UUpCiYT#y;<)sUMl9MR2tuvH%hzCUjR z8EYXbSiSFR51`~$*o8fvJgc0ZH_G@@JmS8lQzK)ndN_T6(#T5E1N01i%)b;Bjbe@M zI%DIng*RF0B|xC#WdMBt!CQ~yt(;VX`Spwbhf#+9Kd;8WOjiXQ9Q6O&gh~yjTKn`8N-i*$_$-yl+YQxcV1HNmkDL9R6;}a+DZ+yod zA$TQ_wr)uGcvXR3;6BK=1O0Fn+N-bHqnC6RfkU4%Qw=4`A?Rtm6ki`8z|wy`%x6w~wqLxA;cjY)MrjEneHh2#V!Ve6POn1!kUS<9Ejz@mDX zRHJvgHnHJ|LuRMQrK!DXb)rPe&1#J+^7X^?L%jz6P>_U-)R|^CF~X;l(`u=msivTZ z!iYhPSy6%cU1K=+q+?`Kl`+sxvrGYDQyc9f{(^(daa5x@^4a2_(!kG0cWO=YsP z+SVw^dmqN0(?v=-EJ1{9x>Zs@PpNszcQeL2lIRSmUM(hDj*rmoFnui@tM4CSFUHhd z{9n6Ftlk`kvm=WJN+vDS@s)~7NU@rE?(E|C-ILoh>?*qhDDIplqY#)GQXq?>*-Mp^ z(i49&omxfF9)zQMGmo+(x3Z1QZ08#~_~IUki`QvhG(V-T?#BI`W`QPDNkBo`h)85b_!?SP( z-;pmxUk>*LD~v=Lghc~z zi@zR&k1aa+p%{|e4S<6YWm^WseaDj35iR4L-C4oJaX_4rYBtg!O?N`xx4m&*_sS^8 zGsd^CHWdMjwUA*-c{Go_7w`-^6WQcPu81UlIFkyIJ{6JP$?C-%u$~qkAuW9Xmcvu`BvZn?KtCa@_ufQ}SU>U9;L)QEwCAa3V+ath`?d@TU zL#nQfK+XmJe*zd4MH{9Vsd+s779y6OB!hm`4(%(J~V?VM{GV zD5^9gWzMr`m^n$FMH2eDD)#Txyec0KSs5RM^CXS7Bo7cIAfF!(F-)4tvobjRhEv+5 z8K|M3pMvsvHp!=p40Pf%nkWhpEm7?=r--tzyUF?QHjHpky-1ea;&*+?5z%!{Q+YXB~9m z^0-lB1sLP&-DOo#;}K~)$8lylIBptA#nzLLi|(c<2-pH)w>}*7HHZB333C}goCkw(C@?f75ugp`G&ArD#Txt? z2hna;aK69d22v$-y&2xQ3k`+~fW9>Nd(69YP2*K<)K1ZMA7KPuW%F|#!IP0{d5QTb z(;9MQ5lhJbz^LYT@n^F7LI%m$n6Bs}b97{Y(Jx&+-rs1vU)^>zB3}|s9$fND*Ps55 z(*l7lNJQ%v(?g+P=L8*Iexx;tCwK=a^X2-go7Q;Ol=KC7e^}T-e7ZLAsLu9Na#ilQ z5sCUs>~vo=$U+e`Mz(iZiQAuutW~Y3Q6fK#%6sUV^q}eCLBmrb#24hC#(X(fn_@?S zE0RZEuPm)2uY3yfEa3@kB5w4IHJTf52J%Qt@LBBg9zt&ojsp6p4Xj!z*H#tGM11yZ zCSMqU@6S)pI3eW{4!X;fNFPOFKL7fY4LHn7j(@WH!iNXUO&q65Isgz^W2e_p7_qo1u34PbW9}(zG59>cysdH!d(E7)tsrO~IjO40imtFqy z?oh@cA{^awdWDbGr0GF5}?>SVp=lJdHAI0 zoB<^_)!F6Fa)2kui{rjkCqyuLV+{di6vhKZJ(yS*WTad5{=`R)M%Zwq-;MhL3Z0j0 zmGDN!kcU}AUIk%krjTBQJ6(J)!-jZR`NxXrlBAwh^crNy(xuV zlG0*zWsrMiWXKv|ym>2h7{@hjN{(^1YxGvhaQMVM=8lcliv+ET-dWY?M z9I25iOm+g1sf2F?K3I?MSuL13{Vq_d|lX1p}b+yO(F?eGKYB-$wjv?-$)<6-!HhjE|@3AA`##O zfFw|L+=Y7gYE{Rf`YnVe->$qoiZKT}Qx0emZsgA*$M-n6`1muAFqiaNyqsVFK^{R* z;76!Spaa2G2FsPE0;FXO#m*_$By{7mVU+4j-2%HSUy(3pBJ}KKH|0t@GA)*J{8@pl zeP)F`RnlR|EY+6!G3;7v&bK@O~DKC!5>be_Dz_HdZg6rmf zrwnChsR5-`OfT1#HCTXWjF_{h>o<;J84pKKV+gexU>`6bg~3=kp0E#tp&T#=Db=Ny zW3Ne%Fd459E+v1IZ?>7)jEO42;SfI(L$)eaL*PPs@&$jTz*bNfVoVWUl4Qa-YeRH+@(3 z>>%W~S8`rS3n5T9h>3-ztzqtRnv1+l9`s}w6T=C#_1;~?>h5jrCG+hxrqztbp@24S zk3ble`8K5w^3P8j2ZiDR&OkS^4YoVdNQe4T^c3lpbJTCC(O|8ph}0iI`FmPp`yrc`3Q4&AYNh0X(K}%2qXc(Z@=urKMkP{ykkz{~H6v zgnhxuapHdYt~TH=o|t##tQdY1(Jt`Es+Ko+P_=cyrjj;~5ID(WvLB$c(S7-Sbt{g*19094D)zGQ|an4>zX`H_bE9)dIhFWdvptlJ+} z;PGiK6`fMy@#9b7XnxmciEW=O)veC*#v|rCQRFB=vfPvTM&Atz>0nYOxzTReR#651 z*OaV--I)ME!x;6Ep#ur-YI6~LYZL8{EV!Z}s*1nXgS z^8#KWfcgZz^s&LA@YdAOj?mH%Dr*PgFtv`@(<(ZmfR@sE$%*DE;$q3hrj=5#F!}^o`HUP^qilC{m({B#Ldvy&dJ=? z=HHV(>)1Bg0e)nWn}!3DVPYdOC#Ps_wS1WTFf({51Yy3wID6zN{0(5<2ytpe5WHS7 z0v-P~Vz!57rkCl?mzWi{U!Lf0wX?UggQ_?zjo6JiPTiiIk({JyghVea(@$U3wEfXc22U{cTI}?WL_5*vCPL zOkyx4J^94t<*8oZWMf~~e+1M3v|Jt%<=CM(iK6!i?h{An>H9qPvP#y9AhK`l7fXe^ zVBr17;Vp}?9zXw>AY}h^vj2gt{;P|X`QOBrAQg4PALGaG>g2RLx6M(>OyMvjD>C75 z(r7b3)Xt*lgF@K)1wA$ki{>!yW)!M;%BZM;ZuIePWODY{c+_)%@p$0WRu*f+WZ?IC zG+ei1Kd^5ADfp8})QRZL$pP)@uWQ$3&*P`RU7wVHz;4^a4bgwwqu*Hee%t}*c*_rm zxEbCaXZjP*AaSQclD_pL+ZLk~y7A5oMEW!DwDk&!3M8LoMv|#mjkNl6&w$`kgEVl@ zl%4j5piEM=mhKWk@=_icQe&mu*`s`{mBV@o4wI#wD?##3_0N#MQqMq9g5Gjqy%Tmz&6;bz-v7mcCv}WIu+be4-+i;By{ayw{u0r^OCYYA|m6gd$tdEJYd*bAP zdvfe2(NYAUj4Vk)-5YRww2uj}+wU@vVs{4P4RA_~@XpGI(xp3gY7iGMI>y*^GBWK( z2Pqqyhh;uz1`DkNZW#%ZIZ8v+!DMwb3gcv+ltUyAe+U`uUWXqiQ!br@DrpMYNDI-d zv3_X-Fi!lllfiUwUett=)#6Agt!(P?^oeE*cuU@HOfjL57_}^q7L9_J9R?zcm-tr861Km_1azyn7@wM=8sR>NIwaLxf5rq9?XSFlnL zp!zK8HB<{m!r(UQwK*K#@-5Y}J<4xjZZYv87M-isjW=bF{z)h+vTUf}q)bwtF{fs| zW_8dwi`Hn}hg&QFfsO_@g#p_=a5EOPen3X^nbwLbtGULw3X*&W^@R41-7Wi~ii^OK z=`6Aymd)OJe!YStOdW%mbjt4;j5BqmfeAH)KfybDC@?X4rO8fJUhLX67H)LIt1T;T zt!4Ts(ZM?haLOQ<-=Tc=;j|4ME$DU`fNL9hdo!<4RR+lOBm8g^3rGd0OLUZeG#A{M z(KU{Rhxl7MVCB5nkns^&O^Ro$@W#uv^}=hZ3@257R>O=;POh4T&6rY=kFbpzC1azn zsr*u%vNZP@`hslqi=@%dF`2nxA~qN3xi9jm5=Xc!ki=$XFE%;&pB(2Pi;k0IrcqA% zK-E^r2U;pND7KBA(xTTWv?kPsKJ+=Jrf^#w3d#o}DJaKytTX{yyaKcza(t}p;}2$? zzK;luv!h3olY#uE+S@WZVlSXxLwRg{o+Z+3T0+hZ2UAlTSm}C64K|t&(AmYw7Q_h+ zXeJB6A~2mD(LgVS6`1%%Qe+la{jwEat9FW zEDO>&VBe=Op_a7#N5epHc%IvT;2@_w7X3;l@a}a(T*r_KvtnE;Zt23ksgsg#ktL3{ zm-$anF84|37R}`CXJd1_EYNh#VIpA86Y_%=@>ro;cjqxY%csXr{~}!`K*hx!8x2&`70HnJ?KE z@U;71ncC`!*_8yykKXA0wqvWpZcJuA)dBMyZIoiy3c4oYSMR2JP4;+6&8c+vld(4R zYsG|VelP&R8LX?3(&+Qi=xmZ&wiQ?siVpw^iyf2(L(D%k6(V7;p12|jP(|nl9vdEu z{Y%=f-3@unNhlRsNfIFMbe8KW0sEH9K0fF&ckKN`xz+AOj>p0FK#eNYbg?E6GIOnh zsacTBtqQ*DKSb&(sHPKbc9RJ^$ImI+Xvd(L&Syj7N>aM_5#%-4+Z|I9VLu0R4 zlVK?Z0f4no<+l|LLCAzutkc6BkKB>IF9;qD%6u?Hg`9vE*D(Tjln*rhz{8AHzWK9V zoXm-oTUkF5?31McKv>-Kd*|TO;SRU5Q$N;}NRk`b(>_{PND5_~iBlIgKUmRKk&zwC zft!n>&1Y`%re^V(JPEjG#^GTMp4kz4M824h0_73U_Qcrw@MsY${dQPy3~yGzL7DK0 zO(;K*ZRJq5p#YCkcuAJg(r+ zPj!*|v#@^F|Dk{W>!Xx2urM}s`tQUPt|!|SUQkd_B2ZcvP*oRDSW!@)mCnP=&;7|x zr=7+4$WLS!P+3vXxzBv3ugd*TZtogBU%tPEm5YanC_Oq zqM&}2gZ08+nS-59-l2@5p!(G5Ajp3-($Mk$g!y5J*Z(2_0^)*q1UNz$;0A#Ghw|Cn zAZl9lr!>O)fiQkX)&Jj(%Q{9Y?^mhY)B1(l!Y6v07k!`+>(6hVOp&uEYL|Sc4vRi#fTQmK_3v^48x* z*9m#_K`$@gDLk83@z@xgkN`K{qy*;)l=bFWyu$4Oazq?8S@<73i9~^;*(re!zY7cS}kt`6>RHbF{EN z)^Ll`Qs}4pY|dmv^&>fSnIL_srdsJHX*&p(_>ISC%yY9(!2i5Dfb`Mv@o2w(75~`V z{0AlD|6ZMcAAS-Cgr3sTSC8&{{7ZUgGI5tU90b06d?KI_>7G0}eT4Pdp$1CQY%$3! zDT@S$^6}>Clv9i439?;7`yOy2vJqh`MaK1P)-;aqR#67m=HAes>z9x17l%aQPpY@= zm+qUFuj>m>@AFNf5dghr<9ZxXyY_HiE;Tz3hQ@*&*#Re9KyK#Mz8EHEtHTp%7}GmP zR=At}0!KHOuwX}w=PgW~akt|;+1cm^dp{hseL|RMi(4!%<~?I>t?E4kAKJaVv1loK zi~yRGm6uYtARJOgAeq(41{Cf|g*(Y$bA`I3yXG}}lIDt69a3ME1)ZS*3zU`Byf`_w zeGQZq^H*LNu1oq1KaX1r-0C?y9=;l5#1(Tl9zHfRbOUHCmdiua-wc<1^b>Pc1~I`I z=;Zwo89!-B`ZeL+E@s2zhT(VcOSL(_ZFUdrQIIf6-@O0qq}qI;wA@|-QTHRl*dC?0@O}&6@(xaPShaLQ?DOfo*M{Tig9c*qjt`8u zy|TmQ?CvFWc4!XCtq}ybl(}`_*!0RhOmqKKMX!7lbU6#R50pNAup4)*xZS0DIG>;K z!2y`C=C8&uH;cDakw>&JH^*HZULG;wo>#XBn?nOM(pGqIFJ`YQC_N@2Tc6!nH~Pyr zOLnPnIzs`oZ!j<~4-Wt^J!Sg=u;~3jR$T#@xCVS~p=@}nceUXZ+3~uO z^y?gzvqaon;{#tVUcfPTw=}^Xi7U7&cWw;4!DN?ih4!K+jAHa#64jDdDhvD2>H#HgK&8RCCgI0yh>w7Gs{wY ze2g>e#E1tBI~ge`|Bcm47!gm*-!_}r*V6OZ)&j(s610Nklj0nS>=6y+ciSvHBzzikE`P_)zW_zEzF3?x|~9g zArv1U(B^zy0(TRmUciLpgBZ1^YX$yEhI|LnOGYdKUciXDl^cRvQc_~@a$ zvv&qvk~zwi;K8V}jY8OKjSzxJHHl^q=TS$Uk7r^nBVo^t93#S!`8l|NXH13=Q{rg3 zlmKnuWR7|X!=dt&e`nZ^`#_DOvngx}uyg2r4BZ8D0aiIpth!y-`?k~6chy&p54P=FgFo`-F`&Vd0W2%*89MuOZH_fPy>naXWiT; zw^q95Bxwt|!TmA2m8AZOCnNSaEv)k>Cy{s~Vll>KVP+$9GwcVys7sGdy2 zkE8<|3ESO%!t)&rn@C|EEh6z5oeMVU#RqbCrr-%szm#|qa=9QLPwW&&bnz8RhcR({ z$BfETtZzz~_GoQKxoFs@u+B*_%w^Z)N@R~Y;%Tr5>8G$Fpsa|e7C3ohdh+Y54{Gk% zlvkZUX^6i0+yq9@YEkrJdbm#AkD+Nk1Eh9*P;j}=8CA!UxtzPG~p z4W>(U$>i*Xqmv8Km(<5c0bPTF-fjpqSB){q=Q)V=S{!Y=XAXR#rX9E865@fRkMnl| zlP|y)G0Dj5&u1%j1dckR$)O&7c~rFNU1oFvs)hQNx>%$ji%<*x8mvy>4#F@uM{pyP zsFh(B><@6w`3a^|uwSK#9Feqj#j$13P}(jsu)v8OGU6mvH&m^G-7XTc;v@ypDL}b< zesL8ek}tu!z=6Kx_XQ!VCh6R$rx1-S>OHR@PyyQ$!gpG#UkB?O+;eIs29|?q8l1__ z-$c1dbZKj^C@^hh2eYq&2GXqO2l!_a5Va*518`U8UXgk) z?{agnQF~FmK^n7SkbDdA;t`7a>IP2ClY7*?UM~V^yn83?-ykNAa#>eO5{{*CLPlMz zd`)g*R*}OP@}6_{x&1;v17xy&Vy0;dFt!?Lv0RI?Wg3=bjU~krOkp_^>kaV_94r(P)x#-;a!{FIl2R4x{K2GE1exGwx`+`B z&E0++AB`g~fi3YDu`p*E&%ssBoNHcWPzDd5vfz4b{yOp+k>E~X#ov5i*3L!aSimoI zWAfZ-YG`}7o$L&?3=&N6|RqtLb}-#abqk^;xu(XPW8dAPGhl6QCtn;PE;y{f$nInASdRw^N=P} zkPW)2-)eMal(o)#$CNTWx{fNjWQ}5oQwl`Jb>%7bU8!n%=rL8>0)DL;*0v zNJ}}$ILZ*K_TgZvGb_kxre?;f07WAT7Xsr$H_=gZ9Pf2MmNA>vEX{PdRc{aGxzD{q zcolwzGb0ym!qaq#=8*nMf!YY$HZGhVE>n4#X;GF>#cozurfvkv16=L6Mg|H|#D^ke z9ewL@;=v!U`%@`r;g2}ES=0B%%-H7{IV#EZ$q@Q{Jx`)D9p{Blhs5z=!i8kx(sU)^ z9d;wX=VnLVolVFDj2({1;AJxp(#+6+wjQ0PNnt zH(a2}(Fo+6(BlGkGN}YTGU(_IAZh}6C&d1Zs5IqJ*gq06pZB8PWI*SwYC>#W z@3&m`Hyli=K1N7Zd1W`jPtSPqtY8av5l_gzNmS%+DvD0QPWqq%>AoDmDffjfy_b#% zB|YEB9kgYifVOmDzKFZIbUT2{!X4qBjXpgv2-y-{2}sF}NT%r^&)mr3gUpEQV9ISg zP}3?VayzcK*qT1}J&h4E;3$qqs(fAk`3$-o=?iWDmMl+kDZmZ07lEFQsnB_U&puwr zBRvE~U$|XpX@0CIB^5VGTk7UMbQErysk-E4wMK}3>~AG`LK#MA*z7m4TxpFFeC8k) zEm8kNMo1T z^Sa_(Ml;9XASy9aJiH*;0_0DtqtinOjMNMPCasClQJ}fed^dzP4+~p(Z zBS`ctAPcLi19~tTt`vv%RIu@82V)(0As%HZY4Y5Vw6%~t+y(s&l%J`YH`u_+uMil1 z-))#!+rTG?C_dpO5+s%RITB2pqRpl#G>j@nW+B?$1o({$z5X(A>)fpC&kU}rsFSk=8{Hq9o+UR&8%(?GG_+yMSzaDJkWe3*qNSO5X%_ELV zz8n+p4kK@G*b5bNQEAd({q8>7wi#Oot4cXu&<3tT$F?1jZeej+ml=jk3t(Dz>Ih#h zEa=0H=)&7@2mnbNoR9c43i0*x}npCp1IEbRk;N!k_HdF8rY74ylV!$8if=${jQc zB6=2O8V-vTPK#LVAd9ofE)diP_jQsH*r1v~-9Rx{k=Mpcu$qW#r4Uc`0aIE)Vf*%x zsWJQ|?=wj6GkPJ+9Oz{F>IKTdKFmGe-ZpL+nv=eQ)H(fhCT{W-+LNs1>ag$(gZmVtG5 z`~(cghjY2~Ri2gRy8bCp@#bu!jcCLV+pf~bGq)+Oxj7ZmUz^|jxL@QP0I!grDxi8` zdkqeRPepo5{^Fg3<(fyc#0zb(Sq7O}(92u%%UZ09W!!O!ax?tHr z+Ce?`x{5@jpW5+fr1Q!J`gu2rh4R~ioz<2w*t*JOq00zexr)py8v0cmVngy}wV;%h zi(p;0OSDu7oGG85o6{d`6Bl)^Qj!K$<+&quxP9ACP~ z8SJ31`tso2hCP=n-&vp-a^(i_U+`KqiohZz5Knk+GJ@`>VfpFs`N&$1$je07v3fNF zScsKc{^p>oO{6~@bsC5|jdhP?=_#6i>S7Kpp8@pz_{i_1)ixV&8!rOhnTg3pw8QYp^2dVYc&vj`K{)*3X3OevtUiXmGQ-JUe>FO%n|jI|ZrL$(Gh zD2y>%lby>a>S%JLq;TcIF}4jvQwkU?hmS%Mt;NbZepvH37!2ygX!Dyra)EO`>4!v# zhf-cXXDI@uypvK4`d`Hh%?9rS!(Y}-A3-3Mhp8sX^#HUdmw}^ysq6DvAi3D12qmTj zOxvYT+>kWLGt##UQ>%Z&76x2{!PvYOlmnk7`k5;4ni@Gml3UTZL0#ZCN*S z;~Ov3vQLz$ZMcxmeelcQ@!qobuTg}g>RXV@JiKkGP@E?CP2zQu3{sahF|Nt*K)9SA zOSau%4YW<{ep2jdw?9@d3I8#!xqY()%S*NRre75{j9q|Wy{!^vtBOb?osCy#`rxA; zyOZplZzS6QuINwj}8-7wDzsIGiCk@i9M-w2kawL^TIY3CKSr?0&P4%?m&BLh00?&TZSJq-cKd zH71F$D#-j*pJ%QZS1oMl=?|ANr<+#&j5b!olUTtwPeAnz?!^p($E3&dolxRbjtkKd zznQ<}is%8VpML}VSh#y2v`8DeMh2?ie0wMtF6o8ng2msVbV~sgYSIED>59gmCWnUX zj1*oG;ppYT?Lqv;Qa{iW=(TS12({=kcATH|8ME>%pwfFn2G^y->jBKWaGr8CVT>%BqtY#@zKx-$^e)1* zfV#U#@qUo80+K1P4Nhrcfh=*3JmX}T}#%I0%VnR@b6Y-$pfuv!2TA6zhvfprswD{Vy zBF`Rf>o8Ng=^U59O|U<`x)%*IE?SmqZU8h16Rp^yHRu(g9;*&I|9Vq3pKLn@wMqd= z8v<9x!p2J^4-Ny~Jd|)b_y8)2eRib6)3gZLZ7EZMkXeGY?;t z77)vf=huO8l1*SYSKu9r*lAyM7@g+&4C}yXc7wm$l^=R_u;e*tK~%F%ggIIh&Lt1o zE!tJTitgumM7a@_)sEOn&vCVQAh1GHF&Z>etC3@$8GJtPP?Vo<5GmNHNI*C7bB}5S zxy^$bSZoV&A&=WhGLV-gT)vMhmVfq58%0bzb@I1RF@?K z+(*drk{&b9nYA}*K6qFU^pBWtcj0HewSwtR;F?)|EIyZ-xz#4!i@D8SkW&Ocy5z6w zB1$$25K}r`_2lvMuZBI#25KWtXt!;kIk_?UoMHkbDzTTFkbzOU)k1eg7X)GOHgG*^>LJV67hr#rbFbKZATFCkJyI(|-^0|1SdM@?QuL zt0-u8yw?xP)3Y0&{{sSvf+E1k#z|KdZ-AC2lC!4q403cBE-*`~2dnB|p zG=A@B|7h=IFQ5V7|3G*CwWYh56Be8wbf@B92+u#!oquoXzZELlu|GEX^6Fd66YF)Q8mHlWAU#Zzm@& z4_NF5x(dwjs?ZLV#|nvr6j;h?N|jXQg3qf@o43N^E9buji;v*23DDlR=z=tkSLOtm_A=4Q2)l z@s&UI%%X6;W;icgXyZA_;3fawOOWNU_k@62Uac(#S`5=(8=%e4e`lKvJ?!@QWNnY0 zgp@pJ1hJxFLVYrIe=Pp7iuEQS zs$u9XREeX^qDALG09C$tf1~krvlOo%-%)T?>6(nf)ZAU8ZBW9 z;mo7vn?`^*PA7yXb$nrN?+GEKIGi8&LU5+Q+C(~I+bT54GKz~N=KH#4X zxI^U*0OgPfv<8dd^(BT$F&vc(1!w82^~3iZxrQlMz9Q<45lQ@tJT?a}ZteSDe8HLZ-@Q|OZ z+M=}&p)+q!28D^z1Y~iH%3HQS8x`IgaQ!f;Pnnj-0xIx>P9vo%y6`B{720G0Y1%D? zU+*Q5DeekYZR38|*Wqp}(%A!C~%z5gr^T6St|P9*Kf%VE99$!X3@1FT2p1gSfY~V*rpx z1^9H7910n^AzK`RTZ5+5!hwxPldQ@pdQ)eiKJ(&qRK$~6N~`9_K`6V8nPF(1MKoGH z?egy^u1Ghf?XyOwxXp(!$u#g5Iu$6^rJS^@gY(j zM+-+)I(|!1nqL&WriNj5_(=s5LwDU`?iD)3g#;GH1zr!-oWo9uO3z#lwCR`;ic8Ak zS>oFDIMYpa^^H{GMe$I?)~4K^0y>(UZNISAQjR`0Mqb$TWMtN0Q3OB#wj1hFf=}Q-Teu zNB!KY3sMDE)O1T8LUyVx^Rt9t4Keo+uoU7j@t(55k1V@#1x~lC&qvAffw{#!eWjL* z*$af+RzT%A_ca-6Iq*gCW?%Lc;u@chV>hY}@N-44p8A4(*6?)LDcgYYB*i?h7B`tQ zI)V6tU78rr*Ebf{S#B^>z|sm~{T9%FPadFR67bQEh5$iS6-kj_8(giTeBO-pGj@XS%CG_O-{ZTo5* z>GZ05wCO|d?eF}(J-n|Flt2Y7y)e?5_DE#S)CM$JBke@j2=-e2WF(t-U}*PxRbVp? zy>+)9X<8J|(T875Q3#j|%(U(o^KQmN9!GBGL+>nb$Lwztr*Ex!D7Uj^ZTokv2u^h+ z2cn}x&x0^qL(L;m8fcbLBa48n=))mqUP~lZ5hI^e;@xh-gfDzysSUIHAgOPNXE{fo zy#DK+j|64=%5l+}3U`(8jk%OxaK-+gP$5lvBI6+xN7qrDLS-k7F76a~yPM8zb5&+$ z?ZNG&yvKS83S3kxlxTunZ3_IvJKKjIZ!Ps?6?vOMqRlr30rD#5r;VoMXBg+z>>I4% z#b=u0s(stA=oF*U*Kj2G`ZF(y*hvKY8!CUBT&ps9iOxp5g7q`ub+8brOS+{ zc&TFKN?_-giC_8|Yf4F zX^3<(muuY^@~-Zriv0TzYRMorYd!Ce(?>nz|ED7T?O^^B*!b^?)cL<$Cy~EN(~L;% z5sS^G0t6sRNwVgLp>$fo1tE*-(3*IaKr zs*hq2UwG4`<5{(-F0`K4Qht0txDtH-ejL#KvfVGC4`x8?w|or^iE)1o3~|RrMb1>I zLl5?M)b|P}!JVdR?M;rLqzxK7V?xP!@KO&d+Yb-NzPaaCqpS|Di?pPKq*PT#R!LN@ zEGMt1MN#e--{l#930hwyf#NiAmRD zoVzHa;m>fKiXPR7bNaqGG?qJp&8 zx^>~fow0{R?cW(;! zW?pr(2Pex(P~Fb&MCDKl^OdYZZx;22>Xj;o>HK}ARALJ4*<0FeqNzz_i=_CphI@&* z8hZ#J>x2>3w2zL-e7wXfW>OaH0eg$6Us606zwP7(R$fH~8*v7}lwYa?yPM~1NMfcn zSV9()8_QKXB!jz?`|7TYw(BX460#^=8SR#+m&}$o9_3b@=ka$wHDS~zDPLs=US0(T zVp`>9J)MfEx=AR!8{Q>UDSXHSx^O|)3btaxd&5RXIWzYk+)Ow7o65Zt@FKvkK$isJ zTXkAEd|FSWHVyU#k*x9e&DKpe%MDwq=|>5#5ik+t4Er9iuFFno{8puOoAlo$LF%wR z;sCoq*5CB_P2v_bOtK=<3TaIgJ5nWXnZ+oM@wbcbaMq)gDr1hlhe3%4dSG+%(3bbu z9{u=$E@Ib^=|xu%yZ}R~iM7LbS9A8jd>JMR{^H+5k1I-V@t@irxV}1n5HQ!#NDv?WG^^H7~Cy2}x~`}@{U*q5JD`Ta)LT6V0|kD@<~4YwdF>GUw7Yig{}0lV9-$@ap>L z?3%+nV8fEpB=BH9j`@nhkZzsNk)N3^~tho0jm5`upN zJ;!&Eb^<;yf^%{gxd>u79$+6&J4HXtp2B;Zq4`GALXa=KUm2%|3miKET&Nv|esPn} z6alw28M0XuWJ8P_o3Cpkazn7b%$fUM4_LYgkZ9CKvDcVkhlV8SIOrC9IR6cEX|1=T znq|zeKp@!zf0SL~Hbm34=DzG`(+`6|-1OU%{oo&C0LYXG;DR4ECC<;o`ya}GWn;%5 z66g;Z^xto8l+uOb!vCu5EC8z5wueu5cb9ZZcXxLq-Q6H1CEXxMH_}ql-AG7CcOSZr zf`opDci(*~@4oxL--clZJ^Q!Tti5JtGc#)~6;Opp7Rst{{XDWLJv3IPNrW|UmXv~E z6%Bf^3+23YJtTOpvcl~fsfki{HBc!~a6j51K?e-9!Q6+U5A6dCExqyiOHMGJ+9Rj=5Osb zhi`Pn>m61t0X)YM*8Za=fXn3giGnZhlQ?RLs}w?xRk@WW^z5Uk73w4l$=^zb)lxGE zworKysSjnI>mhcP>7kswy6GxcO>ZpFT;%|^cMZMGZ~&$IAA7Z+)TMXBt;l~?m^4i^c9(n_}AS5K!!sg8H!xXASdT*qi=j;^8AgvqMT z#I-0mB_vT{c_d{vk=eZiwM-flOY>JNk}r>TR`~FnlSbvz%^elpZKdQhllhI{y;L+q#wR7f6RPMOf`-3 z`a%;NZRlk(4~oHk@Cym`JW9!@O~IGY zDII`nj9W1uMVDmT@J!>6irocbUVI$|7$_D~o-*)l{>{CV8?3uUF$hs_CbY#5Di zm@9);Pc5Y>>hQx$@K3DVQMq98h|S$gE$-hb-%M~X<$NOvJ+W{%iuUu~+izoT^9-J< zz$bR>{1S5&e((9*2YHUa7SDRozc2IMIjXO({%Ni|wJA^!ssR=tlYlg>$~agPVFJGK z5JO@NMR?y~93JcgK16{d?^`@oK1G z_7ub%Od<80q*VMf8#<9Hvh)F6hssenfuJf~#%>rlxb&fk@t7QQAkJ~FDG)wpEoyQZ z&!3`D*%UYuBLwZDED4yw^XDu4)cu_uze7+aS28ztzzxrzx$u)2@LNo0;1n*~ErpcP z7(hN4SPZ0RG#mts;&+5h@#FhL$GxH|eBXOJ8`Bv!HP3e2O6D&(-AQ7zj#lKg5>=u&4^z`~dSqywS2(Fdr)TyvoM!ViwZToWL; zeSjzS1JQ#3j*&4iIwl};iiQ0aTf+a%)O*%j52>!8xf|-jb%aSL*pYabnNWZi-UD;t zdf(nP`VUB{R}tBythZ)TuY$*O*>2e@Urhj5YQ9qyc8N_JnO%?-c9{U1V!p>st+U_4 z8hj+xpST}!NlVoybtM45rRl#l9 zrD|C!Y^?-&sC_uW*5E-9T*wk{!Tg~VaPzDnFyLjdpb)1tIV5Bk7!^mkbxX*VOH=(V z=D&|AKL0=~-WQgi3gbRdrxBgh7?7iiZI+Xnp82^Fd7CN{<5|n*aJMn2DtBUJyQRyG z_NK1-e2^GBvZIm<;mu`%>9XwzSY_h&VU`;0VfnzjF^uqU+dQxC7iu~7`)0y))mpN( zTP)Jvl^B!73|8CjY-Pa~VUw51+M>uXk<*dYCN!uaz!Yhc(@}mSdH({HpLdioQJUC9lQFy1ojvKpJo(MP;7>F*hP_}Vf zc>(7gV^v&|&A-)=0%eDyH1e^$wgrQ+|Dwh&!M+lCfpt3k-#L4D+i#wp0CK~GAW){*FkG=bDA;${^hl_Z#k`dN<}OtAxr!BqHqA7_k8}PbOk#ixTWymi1v2Er~N^{W%{* zuzoMoK!ipSHw6ni|NFq<(J-2%R391A?^2pFv5m;5Xgk0BclgxU}d22MRI9_m#74^#7(l24} zLNj5X#Fe;}ycrWAl9BW+a9x5yrm63y&lED$I!;PYdBXv?X-*$2X40%rDo0w#;>Uk; zDvOh1if*n%BT1PmS|*S_fSE)n2QEm(rqXC%pO;DzAr4F_Ic%Z#@qXTW@~yzS4ZVj- zVkyRQhtC;VCI_P*ChJET98oS6lX~N9#PP)&XtN)>q3Us>@3?f4DevO(iL24Y+rVcd z>QY>=x2(l+4$oB;6v05JT7P5@RVuXG(yx9gQ#xQepMT7ZX<|R*j^(BZvJ`VF#~&cDoVmw~sR~ ztKkQSQPH?|j6Ud^F=JQ_>@*lh>Fn|s#vBDXwDY!@IREJG-w7hm!B>&vB^h7#WF;T1 zudlzT#QsFzT4j0V!l|8_o41n!Vb1dML!M3@SvaJ~XB+7zUJp6CqsGyX51C?CUgNm2 zJ}S@SA>H$wn{ibwn-JMS?`n*RL($RCFmEz@i5cy|j<;LheR6gnfw02XM($qr7BFa69G#iBP0Wwm24MBLI5-3$1w+`!yfQ-YwsbRIy z8xSB!AqAzBySRytOI>>5)LGA*m;b?qGp=lt;HzXVF%-;5tBKIbsTFslrk4wbmyM5; z?0An!w4zfzGqnN=YRNk`9LsoF{3VDI29no(W&#zdY?j1n1n{2H;b~*7@8a+#(eMRa z=*FqJkzy5*loedt7V>vo^R!it^97&vK?*F9k3RUq+>nc7A~x=0MqU(T%wlDuYB+h| zkWR5t6?3qRVv2JsoO=>JgDU6V3}sZ1H<-vYe`(G5vCwmheyiiTqav;`O{>D{CuQ*%F7tV*q-g10HN44Ix1`2BqZC7Q;l$@X0#{7 zG)GOkK$YUa%AUcHfmHqG%{dr1_N>kO7w&w{jUv@0d5620&U7p~#FCiI^^%x4N6}jQ zb>)WfbUO8xbhz7@yW`ay#Lt{%X%R`)V&m$t1)vt4q0FyAfmtYxnKa z_qf~K4I4z?Gc>v(zS`jjiePc)K3mEGEG90@jbFzqK@G8vvQT-y zQVlYL@doy{`FitvpRG{yd~%IvD5oOsUhU|yQ>_a~VeE0J%FD3epZ&lzi1^U}fjP^g zqI3#|MlQ-kF^Dg`{AhS@bTB3Zv9;yZ5Y+KNXfycm%eFjNx{+)s@OJ)k)vyw`S2$?R zHKx$>mjKcB3?0v*{E$A?_B%*>jn^Uk;b}|x@RXvf?gnQPDn%DWtUd^iMj4#cc)bHi z)JU?v8<$kob}t8dk5<5d+V&Oky{bfGf)zyt;nZI}j|1J<@D$?C&V(mfIXwMMhk5c3 zsdq}BPu5_N2w=PPb@`SIfjgwjsb-liBxri&5xIm<=<0c3`NaAHJWf_x)Wi6vnvFl_ zI?c{8b`ISa4K3B<>j`o_Ij^G&*{lyrqiO-wyO%EGM2zl6UEs#y64X$ zxIal+Y_a#cz60C97)nfgDJ1cIF4>T#N11|HBXM_y0*ot&a%+qr(nUHE{>G&OxoyHsYrfr85K&HN-^sY?8dlE`)(sX z39hI_JDu7*D4oeiZp3YEXk2K!yej)`nV68>Wgv&l2u_ilg*(;y5EExKDw+SGfodNm z2Xta9l#SK(+}nsfaGQuE?Ob<59`e@NYSF0bzLB?mP01BBQqt#(pVU~nUDu>q*h9?% zz$g*C>~r1M<=Abt=peBToa;(hp%tU8=l65Jp)-4*F7NU$mgia2CO(vDH|x+9V1VZ3-Z5qBo^TDjks&SsEg(79B6 zTFB3!O^;yE>YE)cFNbBTa-%a|OdkgMa;p+BNYPxSU9B}xriKCMByNyGFwuBS)53g- z!;RGsG7eOnCLEuE1ip9Emx+o^7(=#POAFi!w&{7Qu*oLxfZ2IGFDCyWo&SlfkS3Qm z{Wxe^f$GV|_uP5^{3`@KgU$wqin1NexeNVn=UUIAK*)7Cqw_>5QB0z^8_pXH9k_5h zw1Ev<1Szm8aixj_Ie0oY3;mm5Oos$20!$cV%ya8lA)(A%WyQBOk(Qwyngi1stnGP< zX=LFWhgo|9J^NVt7lQ*?^CQos&m(UaYK08B#y%%LqiIt+s8*khcZ{TN1eSMnw7)Mc z{87|szc@{Esl1;9zvsLbQ5ZSl?d0S%_EIUE$J=_?pIQ05zd* zeB?{+e$Q)%!ARmy&=xN26INS!y$tP5M_P`SCmJ-)XWK75m}hxuGI%*NF_773;iXMU zY3Wd#CwR|i=S=u*H%~XMUpmv14YCm3%ymF`t69?OkcY27g$WSc@7^$O<1ugRd62L(`53e^i+l<#3Yqqn z$w)v^rPxe$M5bS~Z&8_(WM+^E>qda!L`{*u^^<8O>H5JDPg{%;@CuI;ofD4I>wlW99+R8Ly`}!99dSRyN+d9eW~OiQdL9`gQqeRa@Jtl(KTXHLr5r z&ZapGsYlXjh{_ixLSX?h0OqyB*$24lo;7t9NLRQo)Iw!#@8u`5Z!0USTrKgh>*L+_ z&j|s~$e>6gmEOjQI+7ZH5ow=Jw`qmwk1gWN5ABGo-6;d1?bI)CHLK@E`obmL)c(zV{O){J6na6rL;8;c1zaqGntkt zZD}tC79Q6`XL~uKtno;Yh{TEI#&dBf1mYZ zAs_Ub77%|$xfSL@9evwN@`mnooZa})vdnOD;0l1YQo+t!s(y}2rG52vm`CEi!#U`q zt=3#<9tOU;)i+8au?3QFhPQFZ3oX1e&LU9`;sqh&0xLFGQ8OTt{zZde?6kc2f=64%cMMVXfB&Mu`D92&vvnx(J=Qw58i+IZ4vSk zuY{5j%|d@>*JJ@d$|1F#=UL!~a0+96$as25Ic9sK3D0w>dqK%kdN708JiE;3t|rGz z{3e_J{U%udFS#rGO!`b4qFON2M^f#^-e7nO-BOweH@wqa9Znr~%iQ0p?)-9tYL=rH zW=ztL?)%#U;8(EGHksB?5kTxZ1R9ZRrU#SPkYTU8IX6^o3O~O47Fw_!PssHxa@u%I zP9|M&oZY&Ov=yHy_681Nlj(41eBOfDwyR`{1NX!F0v_iA+=puag?XtHAqvi$lZhyb zR`J|;oKIzipa@Uk?7Mh_O#c#m$Nt%P?=tEMxps|0%`NINN|P(*Gy*O{JkKdqf~8L< zhQW2H4_*gqwr`7Fees0FInC?b1xaA$PMefok=m<9NjjU!@%d@bZQx=yqvKG8bovr8 zSEp@=5!QG6O+Pp&Ly~Y)#1np^3d7Ck5-O(xGU?ih!&WIyZ|%-?ilG!H2oX9>iH(~& zX-ZRr$_xP+t?bnYLG&XL5E$>r8hKtR;b1XtYY)l&E6O-wPV0&2_}ecV@-wtZ`m#8w#)&l*GV$I zUyMBq?@Nlo;0#Wp&0iac0qh=3jhW7zg9QMWE6c^LQN+l~AxkmHz~-Z4aoo|_z&_1U zoc;H;I)z`jRBB{YCOKgTSoC|uVbh57s?p3Lk=6FE_c8Ks)HP}K^Swy&&r!+8GjZWM zw9zVy9Na&I7-CX@*JUI$E}uP{HuoOToBrG&qKCn>*=^H1xTdJ(sUcG%rnM?9mVBg- zBV$Kg>EIevsl-v`RGz@Oc|}i2B-AA~7d9_iA;w&qfY^si?kot#u)~V&>T-htc<;ok z$oxeljVdE(`_^&;_QNQ~;)_$(o>zSalSao6(U56!HJj(R)MyN@3rCX(Ed@*9sVZUb zD-$Z9Mx$$+(0%M=8^pLwoY;ZJV|w;ZL0_W~B25TWeKnJprG+_3?E0+q<=F4gE+sT%Tc}5O8zS|r=MsnX>-*$d+f=*`ZFCR;D zh(Q3E!v{+v=@ofR3qA!G(WiZw7s;=zi4|BKC?-C-DP&mV#P<=g>V%1W0sEL~+ z*qe5PG?~5gamrlb(u0@`#56ixu0b(z9-%|dYY#kTu;)S>e6lyK(5@;xvh|Stdp2yR z4ZWBoi{z9?a7Ls;F$#RW++^-S3Va=es^!qG{RhM^>g&bgM7L}0Mz-lCg0x^O=^?+Q zVk>ogTctcg+n+RCP?=An3+Ex7Bbrp)#D53gSsp()k$hU2(7U;yXR0A{;mqjQE+(g< zTM&$Y+*gZTP#LA(+sI%IkN3vJfhM&y-*MnNQ8zCK@ssxOc+Cm|uP%9$2I%joV@xP6 zZSp3qVTaKgWvm4n&Ee$yTBJLypcCsr|MKoFABG(p(Es2rSaIOpDFk(Nh~;RKe=-d7 zkLwm{63fwk!+U=(adm6S;>zI;M)@-6$gx^K>Z{FVucXE%F7Fr#^ltQz!3B}9HTeKr_m!X#Qm6sXqro1BzxXJB@S(K*Ioj%_NzbHHqm>kY)4IkaFTzw(= ziX2_bDVp~2YS67(ZuqLi4JPS4Q#rXUHM!k_bwpn|xrvG9<=yni+NPq`hu6_ zZU$^{kN|fACX??(l~312+Qb5m+yk0AKZpgO#tK)WY7Ud;U%bPkreqEMflnX!-Qwb~ z_mv_CB6&udec$;y8Y?YYw3+ymSn5DV}c06CF0d9Cn0F|RN~ zNfMM*%vKhlNog0+Z2&-8>;+-o5d<(8tV2a)0&y~WL(5;PRWbn@a9fXgDj?n^ zx-_APpBE2584dqAt+KsSk7#g9!JB}{O+w^#)uV`U+Vs+k(>4?DRZoqzAN0f zeu*RS&_-$rn$Bv29SEI_+d}y77Tm*710;KFps(w{up@MqPtVQkc&Cg&vQg_MnO)`u zhwI#XqzV{B()opqaN}RXuifFSKRDuBX}Vv1a}?;kji>ik=-F=n-Wc2-?1nJG;4LNz zrpy*npu85Z+d$FISE_l-VAc^HzQtF?%-D1I}3w^Jp9P5m+%au z5X}2>!KRdqvAov}*dN#29PmN)JriT1Qy?gw%SlsG7gTRLc(;XSw0vAmf4D28L;lz6 z>!iYYI$Q_aBc8~hdQ>E3bc5&FiiH9#nJ88mVF?c^;zXZpy-_~dEkvFW1cZ{#%rrJ8 zRe-7<5CjZOe6(j-{PwQ2(1k@HYs&0^*XK1!?T{zaVQ6bINu>E8 zJL_tmh7F5Zvq2`^dN%#c&~{irCHI>xowRt9Z~I$wgKuF9V0Pt?Jfw`U%RJ_(<0qm54w&Ya>c zzP?cgldH@I9*`EXux|wAB8Kt^&quuVTDqzcJgeJZ{n|d1a}9dnXqrmy@{HydMUN>j zeF@4S$Sr{cJd~l_Kwm(z%hP~koijFSb$d2FEO1ykFdBw%2dH9HYL&_Yh2 zL@;U%oU$%JFPakF#v#b&n1l5{71-G%(hC+E*h+=*CyCa_1D4^ugQ>`Vsez)bA;Wu> zV@$H7%g1CwIwh%MIOK&2NhC9vu=@^430AAl#wKPt21ZDWR^;ut> zdcB~BA0qq~$4v2+m{^_bqYq*67mYp1uC{Y6-Yb_Q(XlcxaM$di%(}gY#%3tfJ_clLHW2F!26wwGNT zM!#rRaTFUKR+1&bIl}gNm`eq9Ek4REYQxgYS>afHFZFF*ev$OsJw{ux&gaDeIGXu+ z5~yPFz#`KH>XYO(o|;+i1kJFVPc3mku?R zbIEDx?7TA!TU*Pt?sD50TYE$`%vjCS!URk|nqXPPFTU(@DSrM6IoHuk zGF{E0acx=chyGw1G^_zs$=3;g*a6!Lm)xM z!FF$CwTfOeR6Ld~;JNm$t;Pfn+}G#E+71JnI|Hqbj8h5bMV9VG!Ey5vexD%v>PuYuedd;2Szpq0HP?K6T@X)P zl3pGB)Ntu8Bmb~^Z7DV$yRcPU^mm9DPMCi0 zHjbdy$#1BFjaY7qgN>+f93KBQx%;t zt&A8^-15mPvaxM6hHTBQ5N4xuM8#-7mLtu{K8;a_5r%K$aG16Mh(JET$SMgKK3rIsTc*x0uRV~{ zhrbd_#aWYf^!?CNsx*cH1z$ZOZX)Q2&&p1>tqO0^Soqcky}G5^2SyrLBUx==|Cuh3 z#r~Mqm~xvHKVDvCp+UZhBdC_UQJLFjnKUkHIEpayRes@X9Ims?+krTPd$B3-xa-KcQn0WuM0LyAImm2_WLbP{Pz{!kWS^^ zi2Cs9QJCiy3lU14K57HY-iibK-r8d0Z_r`@2{{QW>lT#B4VyOB4(E6!`#IHtG@0!m zac%x3Gh z3JDEIO2}5pI3Enc*a*?u6x~{pm-tZ!>w7ZYbz^j#y zuWSzyt?2fvtTCrFF%TMFbIla6%a}AYc{mkx7#mrYa%-P`UHz)VSZ=4`8n@PV$9HA4 zl5Q3AzRHg9;07<>KpkxNNB@cS@*HLX`NI56v<0xfE+b%$^7G|^h7+p%cb_S?k6o?NfX{9ty$TM85SMK9-m7jPuQrQunS$T9N8)$a`kwm)&V$ zYJ3gV-_7?>jr<8FqE@%l*Zsf537Q71X&ae-nvb^%Fh|5)rOm7%)t7F?kxa{f-uwkW zrH|Y!q#u#5HKbqE(t+{>fYuy`QEf`#WqFbH+K1u_w}(EF`jGy9JAsDPNzC}PAX`1* zpdqVrpL=H1CM9__%0+|2mo6}hgC17RnQOUcfmuEM>ZQA5FxP9j5nkwY5#G@is(Rz8 zd4p~~#XLD{fh6Ay|0*LtoKs9^=4u_GxJyetrVT5#W=9vvBOW<*oh`v|`Z^;(wNhai zm|?OMQ+?*g`+ny(n%W4s3jL!}k9(xzxx`)ymk_l18P!-{R=QTs@dGtlbrm=kaTayv zyb=J0t@#R_nY!>uB^uZM+c#QMGGlU=jF!sFZ!QEPF&DegM%a?jlk8+MD3ML=$?+*K zl8?zuczk7fa<7x@$yhs1sfZ|OY%%0z1t>>ls;P8^RGS>7%3chmdVJYV7p8G}kRVyq z&)Pqtv3Fm#GDbwO7Y$;ft$~Ja%FS20F(&92$k*!v=o>!I;hy;+%_sqse% znd~lZUW$=~O3uJ;sz92Pd7Jc-X5gq3qBAf3c^rJZ8N_>`aCz~KS*W!K!)pY~;UmXk zHNA)l2;!_l?5Nea#kg}tFNwV-d*63#U3-pqCHj<^;4ZT$hh(zp5jHCQSm>++k_s;9 z>Jt4}xU7;A3h3yV@%#hLnbLQ}N(!D^j$>TI-~EC!0Hjv@qKil`g4GKSS-e_*gt*1@jZKiaN4VN!FpUTOxK<2X1!#XpA~dX$XGDvw*DO6R(u`olT((2ykh_+ zYj)ywgF|3Jf?}0G?ePY{zc&8;10D=ir1f774eUhpaYIp3 zLzGEYNs8tBM=(&>`=4Fmz#cDu_qhTqdY}(kf)=dDPyf3W1u=lO{?k=aR7qA!QeA^t zQR*k*zlHyy(j($gcoS%CVr64*Ve97P$ntByN6C+$`m;UWwfLhw_FD&S{_N-GY++_& zV*j`O|J>z2joJZQVp3Sf}`v@fgKV zspKT-W@h4S;r4r$gkN{}k;$*$ZLW_32KI^gsh=fNJ!Ss4R*(MjLbA$D9At28kY>N{ z85zf~1P$d!z5bdZd1|H10_v{=+B4?g_AHI_SH_<{r~ZfUiJH2(yPBA}E1Eb{vi_N# z{MS8v)UPjaR$&jMUmYYE7~^kj44!|={%ftLjRFrIwbuJDt;Im2{BunIMr*eJrF96W zZOJJ}>yiJ`xA?fJu`tKs3Pi;Ok zQ#@-6lCchw0SdPGPb&(F6#M^@A?x7a;cjC3x56sLGvy>eM{5Hpn+5%UL_MAdRHXiy zu)j8xle{b-8pLY_Ex(^%Ruun+D`Vnj{V#|7Yib|M@th&ZGzK7ze|N_!rGG&E8>71k zyJB>JT;}mY13L2l(~5#L)&41278Fry;_meCsM@E)3eILiT?BG?ZP1yJcz3WwJX66FIKCKco}?7zV^wf_P9(d>V(J#h{MPb|njRv;m4zX{mT`v(H# zEnfaT-wBD&kP~DbcaZ&=e&ct5vg-WL)4`wq0^nWf!mjl8^(E@VoNQO#Z0^x4-sy(xQ~_XrN1sHmE!U^>22OGW);Lf6Z*O ze<&>nsrp#;{&!}E%m2arpGM9TL2#7=dG;&BKOWcoscKPB1SliYe+puVOZ>ZR)Kdpm z@%}Jy3mSV=&?6u0KRbigML`h0zxH=>va_-H>)m2hYSCQ+xzJ0H8jl&6{%l1-EP?-p zP_X$kYVD~eY~~0sNFYrft%La|1GFv*5()VQBcq|A^4EGitqcAqqbSHf{0ZQJA{PkTanQV;DZ>1jEzpCrS#Pe^}0qI-&XS}5fw zB0TpA;`g#CPeD%$m;3~Y { +public class HColumnDescriptor implements ISerializable, WritableComparable { // For future backward compatibility // Version 3 was when column names become byte arrays and when we picked up @@ -257,6 +262,7 @@ public HColumnDescriptor(final byte [] familyName, final int maxVersions, /** * @return Name of this column family with colon as required by client API */ + @TOJSON(fieldName = "name", base64=true) public byte [] getNameWithColon() { return HStoreKey.addDelimiter(this.name); } @@ -315,6 +321,7 @@ public void setValue(String key, String value) { } /** @return compression type being used for the column family */ + @TOJSON public CompressionType getCompression() { String value = getValue(COMPRESSION); if (value != null) { @@ -327,6 +334,7 @@ else if (value.equalsIgnoreCase("RECORD")) } /** @return maximum number of versions */ + @TOJSON public int getMaxVersions() { String value = getValue(HConstants.VERSIONS); if (value != null) @@ -344,6 +352,7 @@ public void setMaxVersions(int maxVersions) { /** * @return Compression type setting. */ + @TOJSON public CompressionType getCompressionType() { return getCompression(); } @@ -364,6 +373,7 @@ public void setCompressionType(CompressionType type) { /** * @return True if we are to keep all in use HRegionServer cache. */ + @TOJSON(prefixLength = 2) public boolean isInMemory() { String value = getValue(HConstants.IN_MEMORY); if (value != null) @@ -382,6 +392,7 @@ public void setInMemory(boolean inMemory) { /** * @return Maximum value length. */ + @TOJSON public synchronized int getMaxValueLength() { if (this.maxValueLength == null) { String value = getValue(LENGTH); @@ -402,6 +413,7 @@ public void setMaxValueLength(int maxLength) { /** * @return Time-to-live of cell contents, in seconds. */ + @TOJSON public int getTimeToLive() { String value = getValue(TTL); return (value != null)? Integer.valueOf(value).intValue(): DEFAULT_TTL; @@ -417,6 +429,7 @@ public void setTimeToLive(int timeToLive) { /** * @return True if MapFile blocks should be cached. */ + @TOJSON(prefixLength = 2) public boolean isBlockCacheEnabled() { String value = getValue(BLOCKCACHE); if (value != null) @@ -434,6 +447,7 @@ public void setBlockCacheEnabled(boolean blockCacheEnabled) { /** * @return true if a bloom filter is enabled */ + @TOJSON(prefixLength = 2) public boolean isBloomfilter() { String value = getValue(BLOOMFILTER); if (value != null) @@ -577,4 +591,11 @@ else if (result > 0) } return result; } + + /* (non-Javadoc) + * @see org.apache.hadoop.hbase.rest.xml.IOutputXML#toXML() + */ + public void restSerialize(IRestSerializer serializer) throws HBaseRestException { + serializer.serializeColumnDescriptor(this); + } } diff --git a/src/java/org/apache/hadoop/hbase/HTableDescriptor.java b/src/java/org/apache/hadoop/hbase/HTableDescriptor.java index 17505586d145..fa430a12871e 100644 --- a/src/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/src/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -31,14 +31,19 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.client.tableindexed.IndexSpecification; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.serializer.IRestSerializer; +import org.apache.hadoop.hbase.rest.serializer.ISerializable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.WritableComparable; +import agilejson.TOJSON; + /** * HTableDescriptor contains the name of an HTable, and its * column families. */ -public class HTableDescriptor implements WritableComparable { +public class HTableDescriptor implements WritableComparable, ISerializable { // Changes prior to version 3 were not recorded here. // Version 3 adds metadata as a map where keys and values are byte[]. @@ -383,6 +388,7 @@ public void setReadOnly(final boolean readOnly) { } /** @return name of table */ + @TOJSON public byte [] getName() { return name; } @@ -621,6 +627,11 @@ else if (result > 0) public Collection getFamilies() { return Collections.unmodifiableCollection(this.families.values()); } + + @TOJSON(fieldName = "columns") + public HColumnDescriptor[] getColumnFamilies() { + return getFamilies().toArray(new HColumnDescriptor[0]); + } /** * @param column @@ -667,4 +678,11 @@ public static Path getTableDir(Path rootdir, final byte [] tableName) { new HColumnDescriptor(HConstants.COLUMN_FAMILY_HISTORIAN, HConstants.ALL_VERSIONS, HColumnDescriptor.CompressionType.NONE, false, false, Integer.MAX_VALUE, HConstants.WEEK_IN_SECONDS, false)}); + + /* (non-Javadoc) + * @see org.apache.hadoop.hbase.rest.xml.IOutputXML#toXML() + */ + public void restSerialize(IRestSerializer serializer) throws HBaseRestException { + serializer.serializeTableDescriptor(this); + } } \ No newline at end of file diff --git a/src/java/org/apache/hadoop/hbase/RegionHistorian.java b/src/java/org/apache/hadoop/hbase/RegionHistorian.java index 90d5a4e729ec..df08ce74d092 100644 --- a/src/java/org/apache/hadoop/hbase/RegionHistorian.java +++ b/src/java/org/apache/hadoop/hbase/RegionHistorian.java @@ -328,4 +328,4 @@ public void offline() { LOG.debug("Offlined"); } } -} +} \ No newline at end of file diff --git a/src/java/org/apache/hadoop/hbase/io/Cell.java b/src/java/org/apache/hadoop/hbase/io/Cell.java index 76b9b474fc46..4eb831187efc 100644 --- a/src/java/org/apache/hadoop/hbase/io/Cell.java +++ b/src/java/org/apache/hadoop/hbase/io/Cell.java @@ -29,28 +29,36 @@ import java.util.TreeMap; import java.util.Map.Entry; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.serializer.IRestSerializer; +import org.apache.hadoop.hbase.rest.serializer.ISerializable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.Writable; +import agilejson.TOJSON; + /** - * Cell - Used to transport a cell value (byte[]) and the timestamp it was + * Cell - Used to transport a cell value (byte[]) and the timestamp it was * stored with together as a result for get and getRow methods. This promotes - * the timestamp of a cell to a first-class value, making it easy to take - * note of temporal data. Cell is used all the way from HStore up to HTable. + * the timestamp of a cell to a first-class value, making it easy to take note + * of temporal data. Cell is used all the way from HStore up to HTable. */ -public class Cell implements Writable, Iterable> { - protected final SortedMap valueMap = - new TreeMap(new Comparator() { - public int compare(Long l1, Long l2) { - return l2.compareTo(l1); - }}); - +public class Cell implements Writable, Iterable>, + ISerializable { + protected final SortedMap valueMap = new TreeMap( + new Comparator() { + public int compare(Long l1, Long l2) { + return l2.compareTo(l1); + } + }); + /** For Writable compatibility */ public Cell() { } /** * Create a new Cell with a given value and timestamp. Used by HStore. + * * @param value * @param timestamp */ @@ -60,24 +68,29 @@ public Cell(String value, long timestamp) { /** * Create a new Cell with a given value and timestamp. Used by HStore. + * * @param value * @param timestamp */ public Cell(byte[] value, long timestamp) { valueMap.put(timestamp, value); } - + /** - * @param vals array of values - * @param ts array of timestamps + * @param vals + * array of values + * @param ts + * array of timestamps */ public Cell(String[] vals, long[] ts) { this(Bytes.toByteArrays(vals), ts); } - + /** - * @param vals array of values - * @param ts array of timestamps + * @param vals + * array of values + * @param ts + * array of timestamps */ public Cell(byte[][] vals, long[] ts) { if (vals.length != ts.length) { @@ -88,42 +101,51 @@ public Cell(byte[][] vals, long[] ts) { valueMap.put(ts[i], vals[i]); } } - + /** @return the current cell's value */ + @TOJSON(base64=true) public byte[] getValue() { return valueMap.get(valueMap.firstKey()); } - + /** @return the current cell's timestamp */ + @TOJSON public long getTimestamp() { return valueMap.firstKey(); } - + /** @return the number of values this cell holds */ public int getNumValues() { return valueMap.size(); } - - /** Add values and timestamps of another cell into this cell - * @param c Cell + + /** + * Add values and timestamps of another cell into this cell + * + * @param c + * Cell */ public void mergeCell(Cell c) { valueMap.putAll(c.valueMap); } - - /** Add a new timestamp and value to this cell - * @param val value - * @param ts timestamp + + /** + * Add a new timestamp and value to this cell + * + * @param val + * value + * @param ts + * timestamp */ public void add(byte[] val, long ts) { valueMap.put(ts, val); } - + @Override public String toString() { if (valueMap.size() == 1) { - return "timestamp=" + getTimestamp() + ", value=" + - Bytes.toString(getValue()); + return "timestamp=" + getTimestamp() + ", value=" + + Bytes.toString(getValue()); } StringBuilder s = new StringBuilder("{ "); int i = 0; @@ -141,7 +163,7 @@ public String toString() { s.append(" }"); return s.toString(); } - + // // Writable // @@ -162,7 +184,7 @@ public void write(final DataOutput out) throws IOException { Bytes.writeByteArray(out, entry.getValue()); } } - + // // Iterable // @@ -170,23 +192,36 @@ public void write(final DataOutput out) throws IOException { public Iterator> iterator() { return new CellIterator(); } - + private class CellIterator implements Iterator> { private Iterator> it; + CellIterator() { it = valueMap.entrySet().iterator(); } - + public boolean hasNext() { return it.hasNext(); } - + public Entry next() { return it.next(); } - + public void remove() throws UnsupportedOperationException { throw new UnsupportedOperationException("remove is not supported"); } } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.ISerializable#restSerialize(org + * .apache.hadoop.hbase.rest.serializer.IRestSerializer) + */ + public void restSerialize(IRestSerializer serializer) + throws HBaseRestException { + serializer.serializeCell(this); + } } diff --git a/src/java/org/apache/hadoop/hbase/io/RowResult.java b/src/java/org/apache/hadoop/hbase/io/RowResult.java index bbc83e712656..4f650f79c61f 100644 --- a/src/java/org/apache/hadoop/hbase/io/RowResult.java +++ b/src/java/org/apache/hadoop/hbase/io/RowResult.java @@ -32,14 +32,20 @@ import java.util.TreeSet; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.rest.descriptors.RestCell; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.serializer.IRestSerializer; +import org.apache.hadoop.hbase.rest.serializer.ISerializable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.io.Writable; +import agilejson.TOJSON; + /** * Holds row name and then a map of columns to cells. */ -public class RowResult implements Writable, SortedMap { +public class RowResult implements Writable, SortedMap, ISerializable { private byte [] row = null; private final HbaseMapWritable cells; @@ -63,6 +69,7 @@ public RowResult (final byte [] row, * Get the row for this RowResult * @return the row */ + @TOJSON(base64=true) public byte [] getRow() { return row; } @@ -124,6 +131,22 @@ public void clear() { public Set> entrySet() { return Collections.unmodifiableSet(this.cells.entrySet()); } + + /** + * This method used solely for the REST serialization + * + * @return + */ + @TOJSON + public RestCell[] getCells() { + RestCell[] restCells = new RestCell[this.cells.size()]; + int i = 0; + for (Map.Entry entry : this.cells.entrySet()) { + restCells[i] = new RestCell(entry.getKey(), entry.getValue()); + i++; + } + return restCells; + } public Collection values() { ArrayList result = new ArrayList(); @@ -235,10 +258,17 @@ public String toString() { return sb.toString(); } + /* (non-Javadoc) + * @see org.apache.hadoop.hbase.rest.xml.IOutputXML#toXML() + */ + public void restSerialize(IRestSerializer serializer) throws HBaseRestException { + serializer.serializeRowResult(this); + } + // // Writable // - + public void readFields(final DataInput in) throws IOException { this.row = Bytes.readByteArray(in); this.cells.readFields(in); diff --git a/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java b/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java index 966aac44779c..6e48bb5bb770 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java @@ -248,4 +248,4 @@ void interruptIfNecessary() { this.interrupt(); } } -} +} \ No newline at end of file diff --git a/src/java/org/apache/hadoop/hbase/rest/AbstractController.java b/src/java/org/apache/hadoop/hbase/rest/AbstractController.java new file mode 100644 index 000000000000..6bf99e4f8676 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/AbstractController.java @@ -0,0 +1,72 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.parser.IHBaseRestParser; +import org.apache.hadoop.hbase.util.Bytes; + +public abstract class AbstractController implements RESTConstants { + + private Log LOG = LogFactory.getLog(AbstractController.class); + protected Configuration conf; + protected AbstractModel model; + + public void initialize(HBaseConfiguration conf, HBaseAdmin admin) { + this.conf = conf; + this.model = generateModel(conf, admin); + } + + public abstract void get(Status s, byte[][] pathSegments, + Map queryMap) throws HBaseRestException; + + public abstract void post(Status s, byte[][] pathSegments, + Map queryMap, byte[] input, IHBaseRestParser parser) + throws HBaseRestException; + + public abstract void put(Status s, byte[][] pathSegments, + Map queryMap, byte[] input, IHBaseRestParser parser) + throws HBaseRestException; + + public abstract void delete(Status s, byte[][] pathSegments, + Map queryMap) throws HBaseRestException; + + protected abstract AbstractModel generateModel(HBaseConfiguration conf, + HBaseAdmin a); + + protected byte[][] getColumnsFromQueryMap(Map queryMap) { + byte[][] columns = null; + String[] columnArray = queryMap.get(RESTConstants.COLUMN); + if (columnArray != null) { + columns = new byte[columnArray.length][]; + for (int i = 0; i < columnArray.length; i++) { + columns[i] = Bytes.toBytes(columnArray[i]); + } + } + return columns; + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/AbstractModel.java b/src/java/org/apache/hadoop/hbase/rest/AbstractModel.java new file mode 100644 index 000000000000..a03455a3f7d7 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/AbstractModel.java @@ -0,0 +1,99 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.io.IOException; +import java.util.Collection; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.util.Base64; +import org.apache.hadoop.hbase.util.Bytes; + +public abstract class AbstractModel { + + @SuppressWarnings("unused") + private Log LOG = LogFactory.getLog(AbstractModel.class); + protected HBaseConfiguration conf; + protected HBaseAdmin admin; + + protected static class Encodings { + + protected interface Encoding { + + String encode(byte[] b) throws HBaseRestException; + } + + public static Encoding EBase64 = new Encoding() { + + public String encode(byte[] b) throws HBaseRestException { + return new String(Base64.encodeBytes(b)); + } + }; + public static Encoding EUTF8 = new Encoding() { + + public String encode(byte[] b) throws HBaseRestException { + return new String(b); + } + }; + } + + protected static Encodings.Encoding encoding = Encodings.EUTF8; + + public void initialize(HBaseConfiguration conf, HBaseAdmin admin) { + this.conf = conf; + this.admin = admin; + } + + protected byte[][] getColumns(byte[] tableName) throws HBaseRestException { + try { + HTable h = new HTable(tableName); + Collection columns = h.getTableDescriptor() + .getFamilies(); + byte[][] resultant = new byte[columns.size()][]; + int count = 0; + + for (HColumnDescriptor c : columns) { + resultant[count++] = c.getNameWithColon(); + } + + return resultant; + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + protected static byte COLON = Bytes.toBytes(":")[0]; + + protected boolean isColumnFamily(byte[] columnName) { + for (int i = 0; i < columnName.length; i++) { + if (columnName[i] == COLON) { + return true; + } + } + + return false; + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/DatabaseController.java b/src/java/org/apache/hadoop/hbase/rest/DatabaseController.java new file mode 100644 index 000000000000..d732a9efd68f --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/DatabaseController.java @@ -0,0 +1,84 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.parser.IHBaseRestParser; + +public class DatabaseController extends AbstractController { + + @SuppressWarnings("unused") + private Log LOG = LogFactory.getLog(DatabaseController.class); + + protected DatabaseModel getModel() { + return (DatabaseModel) model; + } + + @Override + protected AbstractModel generateModel( + @SuppressWarnings("hiding") HBaseConfiguration conf, + HBaseAdmin admin) { + return new DatabaseModel(conf, admin); + } + + @Override + public void get(Status s, byte[][] pathSegments, + Map queryMap) throws HBaseRestException { + s.setNoQueryResults(); + DatabaseModel innerModel = getModel(); + + if (queryMap.size() == 0) { + s.setOK(innerModel.getDatabaseMetadata()); + } else { + s.setBadRequest("Unknown query."); + } + s.respond(); + } + + @Override + public void post(Status s, byte[][] pathSegments, + Map queryMap, byte[] input, IHBaseRestParser parser) + throws HBaseRestException { + s.setMethodNotImplemented(); + s.respond(); + + } + + @Override + public void put(Status s, byte[][] pathSegments, + Map queryMap, byte[] input, IHBaseRestParser parser) + throws HBaseRestException { + s.setMethodNotImplemented(); + s.respond(); + } + + @Override + public void delete(Status s, byte[][] pathSegments, + Map queryMap) throws HBaseRestException { + s.setMethodNotImplemented(); + s.respond(); + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/DatabaseModel.java b/src/java/org/apache/hadoop/hbase/rest/DatabaseModel.java new file mode 100644 index 000000000000..1c7a4e896993 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/DatabaseModel.java @@ -0,0 +1,85 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.serializer.IRestSerializer; +import org.apache.hadoop.hbase.rest.serializer.ISerializable; + +import agilejson.TOJSON; + +public class DatabaseModel extends AbstractModel { + + @SuppressWarnings("unused") + private Log LOG = LogFactory.getLog(DatabaseModel.class); + + public DatabaseModel(HBaseConfiguration conf, HBaseAdmin admin) { + super.initialize(conf, admin); + } + + public static class DatabaseMetadata implements ISerializable { + protected boolean master_running; + protected HTableDescriptor[] tables; + + public DatabaseMetadata(HBaseAdmin a) throws IOException { + master_running = a.isMasterRunning(); + tables = a.listTables(); + } + + @TOJSON(prefixLength = 2) + public boolean isMasterRunning() { + return master_running; + } + + @TOJSON + public HTableDescriptor[] getTables() { + return tables; + } + + public void restSerialize(IRestSerializer serializer) + throws HBaseRestException { + serializer.serializeDatabaseMetadata(this); + } + } + + // Serialize admin ourselves to json object + // rather than returning the admin object for obvious reasons + public DatabaseMetadata getMetadata() throws HBaseRestException { + return getDatabaseMetadata(); + } + + protected DatabaseMetadata getDatabaseMetadata() throws HBaseRestException { + DatabaseMetadata databaseMetadata = null; + try { + databaseMetadata = new DatabaseMetadata(this.admin); + } catch (IOException e) { + throw new HBaseRestException(e); + } + + return databaseMetadata; + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/Dispatcher.java b/src/java/org/apache/hadoop/hbase/rest/Dispatcher.java index 291525695019..a9ce7515df9f 100644 --- a/src/java/org/apache/hadoop/hbase/rest/Dispatcher.java +++ b/src/java/org/apache/hadoop/hbase/rest/Dispatcher.java @@ -19,7 +19,12 @@ */ package org.apache.hadoop.hbase.rest; +import java.io.BufferedReader; import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; import javax.servlet.ServletException; import javax.servlet.http.HttpServletRequest; @@ -27,49 +32,98 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.parser.HBaseRestParserFactory; +import org.apache.hadoop.hbase.rest.parser.IHBaseRestParser; +import org.apache.hadoop.hbase.rest.serializer.RestSerializerFactory; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.InfoServer; -import org.apache.hadoop.mapred.StatusHttpServer; import org.mortbay.http.NCSARequestLog; import org.mortbay.http.SocketListener; import org.mortbay.jetty.servlet.WebApplicationContext; /** - * Servlet implementation class for hbase REST interface. - * Presumes container ensures single thread through here at any one time - * (Usually the default configuration). In other words, code is not - * written thread-safe. - *

This servlet has explicit dependency on Jetty server; it uses the - * jetty implementation of MultipartResponse. + * Servlet implementation class for hbase REST interface. Presumes container + * ensures single thread through here at any one time (Usually the default + * configuration). In other words, code is not written thread-safe. + *

+ * This servlet has explicit dependency on Jetty server; it uses the jetty + * implementation of MultipartResponse. * - *

TODO: + *

+ * TODO: *

    - *
  • multipart/related response is not correct; the servlet setContentType - * is broken. I am unable to add parameters such as boundary or start to - * multipart/related. They get stripped.
  • - *
  • Currently creating a scanner, need to specify a column. Need to make - * it so the HTable instance has current table's metadata to-hand so easy to - * find the list of all column families so can make up list of columns if none + *
  • multipart/related response is not correct; the servlet setContentType is + * broken. I am unable to add parameters such as boundary or start to + * multipart/related. They get stripped.
  • + *
  • Currently creating a scanner, need to specify a column. Need to make it + * so the HTable instance has current table's metadata to-hand so easy to find + * the list of all column families so can make up list of columns if none * specified.
  • *
  • Minor items are we are decoding URLs in places where probably already * done and how to timeout scanners that are in the scanner list.
  • *
- * @see
Hbase REST Specification + * + * @see Hbase + * REST Specification */ -@SuppressWarnings("serial") -public class Dispatcher extends javax.servlet.http.HttpServlet -implements javax.servlet.Servlet { - @SuppressWarnings("unused") - private static final Log LOG = LogFactory.getLog(Dispatcher.class.getName()); - private MetaHandler metaHandler; - private TableHandler tableHandler; - private RowHandler rowHandler; - private ScannerHandler scannerHandler; - - private static final String SCANNER = "scanner"; - private static final String ROW = "row"; - +public class Dispatcher extends javax.servlet.http.HttpServlet { + + /** + * + */ + private static final long serialVersionUID = -8075335435797071569L; + private static final Log LOG = LogFactory.getLog(Dispatcher.class); + protected DatabaseController dbController; + protected TableController tableController; + protected RowController rowController; + protected ScannerController scannercontroller; + protected TimestampController tsController; + + public enum ContentType { + XML("text/xml"), JSON("application/json"), PLAIN("text/plain"), MIME( + "multipart/related"), NOT_ACCEPTABLE(""); + + private final String type; + + private ContentType(final String t) { + this.type = t; + } + + @Override + public String toString() { + return this.type; + } + + /** + * Utility method used looking at Accept header content. + * + * @param t + * The content type to examine. + * @return The enum that matches the prefix of t or the default + * enum if t is empty. If unsupported type, we return + * NOT_ACCEPTABLE. + */ + public static ContentType getContentType(final String t) { + // Default to text/plain. Curl sends */*. + if (t == null || t.equals("*/*")) { + return ContentType.XML; + } + String lowerCased = t.toLowerCase(); + ContentType[] values = ContentType.values(); + ContentType result = null; + for (int i = 0; i < values.length; i++) { + if (lowerCased.startsWith(values[i].type)) { + result = values[i]; + break; + } + } + return result == null ? NOT_ACCEPTABLE : result; + } + } + /** * Default constructor */ @@ -80,149 +134,293 @@ public Dispatcher() { @Override public void init() throws ServletException { super.init(); - + HBaseConfiguration conf = new HBaseConfiguration(); HBaseAdmin admin = null; - - try{ + + try { admin = new HBaseAdmin(conf); - metaHandler = new MetaHandler(conf, admin); - tableHandler = new TableHandler(conf, admin); - rowHandler = new RowHandler(conf, admin); - scannerHandler = new ScannerHandler(conf, admin); - } catch(Exception e){ + createControllers(); + + dbController.initialize(conf, admin); + tableController.initialize(conf, admin); + rowController.initialize(conf, admin); + tsController.initialize(conf, admin); + scannercontroller.initialize(conf, admin); + + LOG.debug("no errors in init."); + } catch (Exception e) { + System.out.println(e.toString()); throw new ServletException(e); } } + protected void createControllers() { + dbController = new DatabaseController(); + tableController = new TableController(); + rowController = new RowController(); + tsController = new TimestampController(); + scannercontroller = new ScannerController(); + } + + @SuppressWarnings("unchecked") @Override protected void doGet(HttpServletRequest request, HttpServletResponse response) - throws IOException, ServletException { - String [] pathSegments = getPathSegments(request); - - if (pathSegments.length == 0 || pathSegments[0].length() <= 0) { - // if it was a root request, then get some metadata about - // the entire instance. - metaHandler.doGet(request, response, pathSegments); - } else { - if (pathSegments.length >= 2 && pathSegments[0].length() > 0 && pathSegments[1].toLowerCase().equals(ROW)) { - // if it has table name and row path segments - rowHandler.doGet(request, response, pathSegments); + throws IOException, ServletException { + try { + Status s = this.createStatus(request, response); + byte[][] pathSegments = getPathSegments(request); + Map queryMap = request.getParameterMap(); + + if (pathSegments.length == 0 || pathSegments[0].length <= 0) { + // if it was a root request, then get some metadata about + // the entire instance. + dbController.get(s, pathSegments, queryMap); } else { - // otherwise, it must be a GET request suitable for the - // table handler. - tableHandler.doGet(request, response, pathSegments); + if (pathSegments.length >= 2 + && pathSegments.length <= 3 + && pathSegments[0].length > 0 + && Bytes.toString(pathSegments[1]).toLowerCase().equals( + RESTConstants.ROW)) { + // if it has table name and row path segments + rowController.get(s, pathSegments, queryMap); + } else if (pathSegments.length == 4 + && Bytes.toString(pathSegments[1]).toLowerCase().equals( + RESTConstants.ROW)) { + tsController.get(s, pathSegments, queryMap); + } else { + // otherwise, it must be a GET request suitable for the + // table handler. + tableController.get(s, pathSegments, queryMap); + } + } + LOG.debug("GET - No Error"); + } catch (HBaseRestException e) { + LOG.debug("GET - Error: " + e.toString()); + try { + Status sError = createStatus(request, response); + sError.setInternalError(e); + sError.respond(); + } catch (HBaseRestException f) { + response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); } } } + @SuppressWarnings("unchecked") @Override protected void doPost(HttpServletRequest request, HttpServletResponse response) - throws IOException, ServletException { - String [] pathSegments = getPathSegments(request); - - if (pathSegments.length == 0 || pathSegments[0].length() <= 0) { - // if it was a root request, it must be a create table request - tableHandler.doPost(request, response, pathSegments); - return; - } else { - // there should be at least two path segments (table name and row or - // scanner or disable/enable operation) - if (pathSegments.length >= 2 && pathSegments[0].length() > 0) { - if (pathSegments[1].toLowerCase().equals(SCANNER) - && pathSegments.length >= 2) { - scannerHandler.doPost(request, response, pathSegments); - return; - } else if (pathSegments[1].toLowerCase().equals(ROW) - && pathSegments.length >= 3) { - rowHandler.doPost(request, response, pathSegments); - return; - } else if ((pathSegments[1].toLowerCase().equals(TableHandler.DISABLE) || pathSegments[1].toLowerCase().equals(TableHandler.ENABLE)) - && pathSegments.length == 2) { - tableHandler.doPost(request, response, pathSegments); - return; + throws IOException, ServletException { + try { + + Status s = createStatus(request, response); + byte[][] pathSegments = getPathSegments(request); + Map queryMap = request.getParameterMap(); + byte[] input = readInputBuffer(request); + IHBaseRestParser parser = this.getParser(request); + + if ((pathSegments.length >= 0 && pathSegments.length <= 1) + || Bytes.toString(pathSegments[1]).toLowerCase().equals( + RESTConstants.ENABLE) + || Bytes.toString(pathSegments[1]).toLowerCase().equals( + RESTConstants.DISABLE)) { + // this is a table request + tableController.post(s, pathSegments, queryMap, input, parser); + } else { + // there should be at least two path segments (table name and row or + // scanner) + if (pathSegments.length >= 2 && pathSegments[0].length > 0) { + if (Bytes.toString(pathSegments[1]).toLowerCase().equals( + RESTConstants.SCANNER)) { + scannercontroller.post(s, pathSegments, queryMap, input, parser); + return; + } else if (Bytes.toString(pathSegments[1]).toLowerCase().equals( + RESTConstants.ROW) + && pathSegments.length >= 3) { + rowController.post(s, pathSegments, queryMap, input, parser); + return; + } } } + } catch (HBaseRestException e) { + LOG.debug("POST - Error: " + e.toString()); + try { + Status s_error = createStatus(request, response); + s_error.setInternalError(e); + s_error.respond(); + } catch (HBaseRestException f) { + response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); + } } - - // if we get to this point, then no handler was matched this request. - GenericHandler.doNotFound(response, "No handler for " + request.getPathInfo()); } - + @SuppressWarnings("unchecked") @Override protected void doPut(HttpServletRequest request, HttpServletResponse response) - throws ServletException, IOException { - String [] pathSegments = getPathSegments(request); - - if (pathSegments.length == 1 && pathSegments[0].length() > 0) { - // if it has only table name - tableHandler.doPut(request, response, pathSegments); - } else { - // Equate PUT with a POST. - doPost(request, response); + throws ServletException, IOException { + try { + byte[][] pathSegments = getPathSegments(request); + if(pathSegments.length == 0) { + throw new HBaseRestException("method not supported"); + } else if (pathSegments.length == 1 && pathSegments[0].length > 0) { + // if it has only table name + Status s = createStatus(request, response); + Map queryMap = request.getParameterMap(); + IHBaseRestParser parser = this.getParser(request); + byte[] input = readInputBuffer(request); + tableController.put(s, pathSegments, queryMap, input, parser); + } else { + // Equate PUT with a POST. + doPost(request, response); + } + } catch (HBaseRestException e) { + try { + Status s_error = createStatus(request, response); + s_error.setInternalError(e); + s_error.respond(); + } catch (HBaseRestException f) { + response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); + } } } + @SuppressWarnings("unchecked") @Override protected void doDelete(HttpServletRequest request, - HttpServletResponse response) - throws IOException, ServletException { - String [] pathSegments = getPathSegments(request); - - if (pathSegments.length == 1 && pathSegments[0].length() > 0) { - // if it only has only table name - tableHandler.doDelete(request, response, pathSegments); - return; - } else if (pathSegments.length >= 3 && pathSegments[0].length() > 0) { - // must be at least two path segments (table name and row or scanner) - if (pathSegments[1].toLowerCase().equals(SCANNER) && - pathSegments.length == 3 && pathSegments[2].length() > 0) { - // DELETE to a scanner requires at least three path segments - scannerHandler.doDelete(request, response, pathSegments); - return; - } else if (pathSegments[1].toLowerCase().equals(ROW) && - pathSegments.length >= 3) { - rowHandler.doDelete(request, response, pathSegments); + HttpServletResponse response) throws IOException, ServletException { + try { + Status s = createStatus(request, response); + byte[][] pathSegments = getPathSegments(request); + Map queryMap = request.getParameterMap(); + + if(pathSegments.length == 0) { + throw new HBaseRestException("method not supported"); + } else if (pathSegments.length == 1 && pathSegments[0].length > 0) { + // if it only has only table name + tableController.delete(s, pathSegments, queryMap); return; + } else if (pathSegments.length >= 3 && pathSegments[0].length > 0) { + // must be at least two path segments (table name and row or scanner) + if (Bytes.toString(pathSegments[1]).toLowerCase().equals( + RESTConstants.SCANNER) + && pathSegments.length == 3 && pathSegments[2].length > 0) { + // DELETE to a scanner requires at least three path segments + scannercontroller.delete(s, pathSegments, queryMap); + return; + } else if (Bytes.toString(pathSegments[1]).toLowerCase().equals( + RESTConstants.ROW) + && pathSegments.length >= 3) { + rowController.delete(s, pathSegments, queryMap); + return; + } else if (pathSegments.length == 4) { + tsController.delete(s, pathSegments, queryMap); + } + } + } catch (HBaseRestException e) { + LOG.debug("POST - Error: " + e.toString()); + try { + Status s_error = createStatus(request, response); + s_error.setInternalError(e); + s_error.respond(); + } catch (HBaseRestException f) { + response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); } } - - // if we reach this point, then no handler exists for this request. - GenericHandler.doNotFound(response, "No handler"); } - - /* + + /** + * This method will get the path segments from the HttpServletRequest. Please + * note that if the first segment of the path is /api this is removed from the + * returning byte array. + * * @param request + * * @return request pathinfo split on the '/' ignoring the first '/' so first * element in pathSegment is not the empty string. */ - private String [] getPathSegments(final HttpServletRequest request) { + protected byte[][] getPathSegments(final HttpServletRequest request) { int context_len = request.getContextPath().length() + 1; - return request.getRequestURI().substring(context_len).split("/"); + + byte[][] pathSegments = Bytes.toByteArrays(request.getRequestURI().substring(context_len) + .split("/")); + byte[] apiAsBytes = "api".getBytes(); + if (Arrays.equals(apiAsBytes, pathSegments[0])) { + byte[][] newPathSegments = new byte[pathSegments.length - 1][]; + for(int i = 0; i < newPathSegments.length; i++) { + newPathSegments[i] = pathSegments[i + 1]; + } + pathSegments = newPathSegments; + } + return pathSegments; + } + + protected byte[] readInputBuffer(HttpServletRequest request) + throws HBaseRestException { + try { + String resultant = ""; + BufferedReader r = request.getReader(); + + int maxLength = 5000; // tie to conf + int bufferLength = 640; + + char[] c = new char[bufferLength]; // 40 characters * sizeof(UTF16) + // TODO make s maxLength and c size values in configuration + if (!r.ready()) { + Thread.sleep(1000); // If r is not ready wait 1 second + if (!r.ready()) { // If r still is not ready something is wrong, return + // blank. + return new byte[0]; + } + } + while (r.ready()) { + int n = r.read(c, 0, bufferLength); + resultant += new String(c); + if (n != bufferLength) { + break; + } else if (resultant.length() > maxLength) { + resultant = resultant.substring(0, maxLength); + break; + } + } + return Bytes.toBytes(resultant.trim()); + } catch (Exception e) { + throw new HBaseRestException(e); + } + } + + protected IHBaseRestParser getParser(HttpServletRequest request) + throws HBaseRestException { + return HBaseRestParserFactory.getParser(ContentType.getContentType(request + .getHeader("content-type"))); + } + + protected Status createStatus(HttpServletRequest request, + HttpServletResponse response) throws HBaseRestException { + return new Status(response, RestSerializerFactory.getSerializer(request, + response), this.getPathSegments(request)); } // // Main program and support routines // - - private static void printUsageAndExit() { + protected static void printUsageAndExit() { printUsageAndExit(null); } - - private static void printUsageAndExit(final String message) { + + protected static void printUsageAndExit(final String message) { if (message != null) { System.err.println(message); } - System.out.println("Usage: java org.apache.hadoop.hbase.rest.Dispatcher " + - "--help | [--port=PORT] [--bind=ADDR] start"); + System.out.println("Usage: java org.apache.hadoop.hbase.rest.Dispatcher " + + "--help | [--port=PORT] [--bind=ADDR] start"); System.out.println("Arguments:"); System.out.println(" start Start REST server"); System.out.println(" stop Stop REST server"); System.out.println("Options:"); System.out.println(" port Port to listen on. Default: 60050."); System.out.println(" bind Address to bind on. Default: 0.0.0.0."); - System.out.println(" max-num-threads The maximum number of threads for Jetty to run. Defaults to 256."); + System.out + .println(" max-num-threads The maximum number of threads for Jetty to run. Defaults to 256."); System.out.println(" help Print this message and exit."); System.exit(0); @@ -230,9 +428,10 @@ private static void printUsageAndExit(final String message) { /* * Start up the REST servlet in standalone mode. + * * @param args */ - protected static void doMain(final String [] args) throws Exception { + protected static void doMain(final String[] args) throws Exception { if (args.length < 1) { printUsageAndExit(); } @@ -246,7 +445,7 @@ protected static void doMain(final String [] args) throws Exception { final String addressArgKey = "--bind="; final String portArgKey = "--port="; final String numThreadsKey = "--max-num-threads="; - for (String cmd: args) { + for (String cmd : args) { if (cmd.startsWith(addressArgKey)) { bindAddress = cmd.substring(addressArgKey.length()); continue; @@ -258,14 +457,14 @@ protected static void doMain(final String [] args) throws Exception { } else if (cmd.equals("start")) { continue; } else if (cmd.equals("stop")) { - printUsageAndExit("To shutdown the REST server run " + - "bin/hbase-daemon.sh stop rest or send a kill signal to " + - "the REST server pid"); - } else if (cmd.startsWith(numThreadsKey)) { + printUsageAndExit("To shutdown the REST server run " + + "bin/hbase-daemon.sh stop rest or send a kill signal to " + + "the REST server pid"); + } else if (cmd.startsWith(numThreadsKey)) { numThreads = Integer.parseInt(cmd.substring(numThreadsKey.length())); continue; } - + // Print out usage if we get to here. printUsageAndExit(); } @@ -278,20 +477,17 @@ protected static void doMain(final String [] args) throws Exception { NCSARequestLog ncsa = new NCSARequestLog(); ncsa.setLogLatency(true); webServer.setRequestLog(ncsa); - WebApplicationContext context = - webServer.addWebApplication("/api", InfoServer.getWebAppDir("rest")); - context.addServlet("stacks", "/stacks", - StatusHttpServer.StackServlet.class.getName()); - context.addServlet("logLevel", "/logLevel", - org.apache.hadoop.log.LogLevel.Servlet.class.getName()); + WebApplicationContext context = webServer.addWebApplication("/", InfoServer + .getWebAppDir("rest")); webServer.start(); } - + /** * @param args - * @throws Exception + * @throws Exception */ - public static void main(String [] args) throws Exception { + public static void main(String[] args) throws Exception { + System.out.println("Starting restServer"); doMain(args); } } diff --git a/src/java/org/apache/hadoop/hbase/rest/GenericHandler.java b/src/java/org/apache/hadoop/hbase/rest/GenericHandler.java index 2266aaaeb3cf..e69de29bb2d1 100644 --- a/src/java/org/apache/hadoop/hbase/rest/GenericHandler.java +++ b/src/java/org/apache/hadoop/hbase/rest/GenericHandler.java @@ -1,342 +0,0 @@ -/** - * Copyright 2007 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.rest; - -import java.io.IOException; -import java.io.PrintWriter; -import java.io.UnsupportedEncodingException; -import java.net.URLDecoder; -import java.util.Map; - -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HConstants; -import org.znerd.xmlenc.LineBreak; -import org.znerd.xmlenc.XMLOutputter; - -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.io.Cell; -import org.apache.hadoop.hbase.util.Bytes; - -/** - * GenericHandler contains some basic common stuff that all the individual - * REST handler types take advantage of. - */ -public abstract class GenericHandler { - protected HBaseConfiguration conf; - protected HBaseAdmin admin; - - protected static final String ACCEPT = "accept"; - protected static final String COLUMN = "column"; - protected static final String TIMESTAMP = "timestamp"; - protected static final String START_ROW = "start_row"; - protected static final String END_ROW = "end_row"; - protected static final String CONTENT_TYPE = "content-type"; - protected static final String ROW = "row"; - protected static final String REGIONS = "regions"; - protected static final String VERSION = "version"; - protected static final String OFFSET = "offset"; - protected static final String LIMIT = "limit"; - - protected final Log LOG = LogFactory.getLog(this.getClass()); - - public GenericHandler(HBaseConfiguration conf, HBaseAdmin admin) { - this.conf = conf; - this.admin = admin; - } - - /* - * Supported content types as enums - */ - protected enum ContentType { - XML("text/xml"), - PLAIN("text/plain"), - MIME("multipart/related"), - NOT_ACCEPTABLE(""); - - private final String type; - - private ContentType(final String t) { - this.type = t; - } - - @Override - public String toString() { - return this.type; - } - - /** - * Utility method used looking at Accept header content. - * @param t The content type to examine. - * @return The enum that matches the prefix of t or - * the default enum if t is empty. If unsupported type, we - * return NOT_ACCEPTABLE. - */ - public static ContentType getContentType(final String t) { - // Default to text/plain. Curl sends */*. - if (t == null || t.equals("*/*")) { - return ContentType.XML; - } - String lowerCased = t.toLowerCase(); - ContentType [] values = ContentType.values(); - ContentType result = null; - for (int i = 0; i < values.length; i++) { - if (lowerCased.startsWith(values[i].type)) { - result = values[i]; - break; - } - } - return result == null? NOT_ACCEPTABLE: result; - } - } - - - /* - * @param o - * @return XMLOutputter wrapped around o. - * @throws IllegalStateException - * @throws IOException - */ - protected XMLOutputter getXMLOutputter(final PrintWriter o) - throws IllegalStateException, IOException { - XMLOutputter outputter = new XMLOutputter(o, HConstants.UTF8_ENCODING); - outputter.setLineBreak(LineBreak.UNIX); - outputter.setIndentation(" "); - outputter.declaration(); - return outputter; - } - - /* - * Write an XML element. - * @param outputter - * @param name - * @param value - * @throws IllegalStateException - * @throws IOException - */ - protected void doElement(final XMLOutputter outputter, - final String name, final String value) - throws IllegalStateException, IOException { - outputter.startTag(name); - if (value.length() > 0) { - outputter.pcdata(value); - } - outputter.endTag(); - } - - /* - * Set content-type, encoding, and status on passed response - * @param response - * @param status - * @param contentType - */ - public static void setResponseHeader(final HttpServletResponse response, - final int status, final String contentType) { - // Container adds the charset to the HTTP content-type header. - response.setContentType(contentType); - response.setCharacterEncoding(HConstants.UTF8_ENCODING); - response.setStatus(status); - } - - /* - * If we can't do the specified Accepts header type. - * @param response - * @throws IOException - */ - public static void doNotAcceptable(final HttpServletResponse response) - throws IOException { - response.sendError(HttpServletResponse.SC_NOT_ACCEPTABLE); - } - - /* - * If we can't do the specified Accepts header type. - * @param response - * @param message - * @throws IOException - */ - public static void doNotAcceptable(final HttpServletResponse response, - final String message) - throws IOException { - response.sendError(HttpServletResponse.SC_NOT_ACCEPTABLE, message); - } - - /* - * Resource not found. - * @param response - * @throws IOException - */ - public static void doNotFound(final HttpServletResponse response) - throws IOException { - response.sendError(HttpServletResponse.SC_NOT_FOUND); - } - - /* - * Resource not found. - * @param response - * @param msg - * @throws IOException - */ - public static void doNotFound(final HttpServletResponse response, final String msg) - throws IOException { - response.sendError(HttpServletResponse.SC_NOT_FOUND, msg); - } - - /* - * Unimplemented method. - * @param response - * @param message to send - * @throws IOException - */ - public static void doMethodNotAllowed(final HttpServletResponse response, - final String message) - throws IOException { - response.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message); - } - - protected String getTableName(final String [] pathSegments) - throws UnsupportedEncodingException { - // Get table name? First part of passed segment. It can't be empty string - // or null because we should have tested for that before coming in here. - return URLDecoder.decode(pathSegments[0], HConstants.UTF8_ENCODING); - } - - /* - * Output row columns - * @param outputter - * @param m - * @throws IllegalStateException - * @throws IllegalArgumentException - * @throws IOException - */ - protected void outputColumnsXml(final XMLOutputter outputter, final Map m) - throws IllegalStateException, IllegalArgumentException, IOException { - outputColumnsXml(null, outputter, m); - } - - protected void outputColumnsXml(final HttpServletRequest request, - final XMLOutputter outputter, final Map m) - throws IllegalStateException, IllegalArgumentException, IOException { - int offset = 0, limit = Integer.MAX_VALUE; - if (request != null) { - String offset_string = request.getParameter(OFFSET); - if (offset_string != null && !offset_string.equals("")) - offset = Integer.parseInt(offset_string); - String limit_string = request.getParameter(LIMIT); - if (limit_string != null && !limit_string.equals("")) { - limit = Integer.parseInt(limit_string); - } - } - - for (Map.Entry e: m.entrySet()) { - if (offset > 0) { - --offset; - continue; - } - if (limit < 1) { - break; - } else { - --limit; - } - outputter.startTag(COLUMN); - doElement(outputter, "name", - org.apache.hadoop.hbase.util.Base64.encodeBytes(e.getKey())); - outputCellXml(outputter, e.getValue()); - outputter.endTag(); - } - } - - protected void outputColumnsWithMultiVersionsXml(final XMLOutputter outputter, - final Map m) - throws IllegalStateException, IllegalArgumentException, IOException { - outputColumnsWithMultiVersionsXml(null, outputter, m); - } - - protected void outputColumnsWithMultiVersionsXml(final HttpServletRequest request, - final XMLOutputter outputter, final Map m) - throws IllegalStateException, IllegalArgumentException, IOException { - int offset = 0, limit = Integer.MAX_VALUE; - if (request != null) { - String offset_string = request.getParameter(OFFSET); - if (offset_string != null && !offset_string.equals("")) - offset = Integer.parseInt(offset_string); - String limit_string = request.getParameter(LIMIT); - if (limit_string != null && !limit_string.equals("")) { - limit = Integer.parseInt(limit_string); - } - } - - for (Map.Entry e: m.entrySet()) { - if (offset > 0) { - --offset; - continue; - } - if (limit < 1) { - break; - } else { - --limit; - } - for (Cell c : e.getValue()) { - outputter.startTag(COLUMN); - doElement(outputter, "name", - org.apache.hadoop.hbase.util.Base64.encodeBytes(e.getKey())); - outputCellXml(outputter, c); - outputter.endTag(); - } - } - } - - protected void outputCellXml(final XMLOutputter outputter, Cell c) - throws IllegalStateException, IllegalArgumentException, IOException { - // We don't know String from binary data so we always base64 encode. - doElement(outputter, "value", - org.apache.hadoop.hbase.util.Base64.encodeBytes(c.getValue())); - doElement(outputter, "timestamp", String.valueOf(c.getTimestamp())); - } -// Commented - multipart support is currently nonexistant. -// protected void outputColumnsMime(final MultiPartResponse mpr, -// final Map m) -// throws IOException { -// for (Map.Entry e: m.entrySet()) { -// mpr.startPart("application/octet-stream", -// new String [] {"Content-Description: " + e.getKey().toString(), -// "Content-Transfer-Encoding: binary", -// "Content-Length: " + e.getValue().getValue().length}); -// mpr.getOut().write(e.getValue().getValue()); -// } -// } - - /* - * Get an HTable instance by it's table name. - */ - protected HTable getTable(final String tableName) throws IOException { - return new HTable(this.conf, Bytes.toBytes(tableName)); - } - - protected String makeColumnName(String column) { - if (column.indexOf(':') == -1) - column += ':'; - return column; - } -} diff --git a/src/java/org/apache/hadoop/hbase/rest/MetaHandler.java b/src/java/org/apache/hadoop/hbase/rest/MetaHandler.java index 64e288b22893..e69de29bb2d1 100644 --- a/src/java/org/apache/hadoop/hbase/rest/MetaHandler.java +++ b/src/java/org/apache/hadoop/hbase/rest/MetaHandler.java @@ -1,108 +0,0 @@ -/** - * Copyright 2007 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.rest; - -import java.io.IOException; -import java.io.PrintWriter; - -import javax.servlet.ServletException; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; - -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.util.Bytes; -import org.znerd.xmlenc.XMLOutputter; - - -/** - * MetaHandler fields all requests for metadata at the instance level. At the - * moment this is only GET requests to /. - */ -public class MetaHandler extends GenericHandler { - - public MetaHandler(HBaseConfiguration conf, HBaseAdmin admin) - throws ServletException{ - super(conf, admin); - } - - - public void doGet(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { - getTables(request, response); - } - - public void doPost(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { - doMethodNotAllowed(response, "POST not allowed at /"); - } - - public void doPut(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { - doMethodNotAllowed(response, "PUT not allowed at /"); - } - - public void doDelete(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { - doMethodNotAllowed(response, "DELETE not allowed at /"); - } - - /* - * Return list of tables. - * @param request - * @param response - */ - private void getTables(final HttpServletRequest request, - final HttpServletResponse response) - throws IOException { - HTableDescriptor [] tables = this.admin.listTables(); - - switch (ContentType.getContentType(request.getHeader(ACCEPT))) { - case XML: - setResponseHeader(response, tables.length > 0? 200: 204, - ContentType.XML.toString()); - XMLOutputter outputter = getXMLOutputter(response.getWriter()); - outputter.startTag("tables"); - for (int i = 0; i < tables.length; i++) { - doElement(outputter, "table", Bytes.toString(tables[i].getName())); - } - outputter.endTag(); - outputter.endDocument(); - outputter.getWriter().close(); - break; - case PLAIN: - setResponseHeader(response, tables.length > 0? 200: 204, - ContentType.PLAIN.toString()); - PrintWriter out = response.getWriter(); - for (int i = 0; i < tables.length; i++) { - out.println(Bytes.toString(tables[i].getName())); - } - out.close(); - break; - default: - doNotAcceptable(response); - } - } -} diff --git a/src/java/org/apache/hadoop/hbase/rest/RESTConstants.java b/src/java/org/apache/hadoop/hbase/rest/RESTConstants.java new file mode 100644 index 000000000000..7a79383ccb07 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/RESTConstants.java @@ -0,0 +1,111 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import org.apache.hadoop.hbase.rest.filter.RowFilterSetFactory; +import org.apache.hadoop.hbase.rest.filter.StopRowFilterFactory; +import org.apache.hadoop.hbase.rest.filter.WhileMatchRowFilterFactory; +import org.apache.hadoop.hbase.rest.filter.PageRowFilterFactory; +import org.apache.hadoop.hbase.rest.filter.ColumnValueFilterFactory; +import org.apache.hadoop.hbase.rest.filter.RegExpRowFilterFactory; +import org.apache.hadoop.hbase.rest.filter.InclusiveStopRowFilterFactory; +import java.util.HashMap; +import org.apache.hadoop.hbase.rest.filter.FilterFactory; + +public interface RESTConstants { + final static String TRUE = "true"; + final static String FALSE = "false"; + // Used for getting all data from a column specified in that order. + final static String COLUMNS = "columns"; + final static String COLUMN = "column"; + // Used with TableExists + final static String EXISTS = "exists"; + // Maps to Transaction ID + final static String TRANSACTION = "transaction"; + // Transaction Operation Key. + final static String TRANSACTION_OPERATION = "transaction_op"; + // Transaction Operation Values + final static String TRANSACTION_OPERATION_COMMIT = "commit"; + final static String TRANSACTION_OPERATION_CREATE = "create"; + final static String TRANSACTION_OPERATION_ABORT = "abort"; + // Filter Key + final static String FILTER = "filter"; + final static String FILTER_TYPE = "type"; + final static String FILTER_VALUE = "value"; + final static String FILTER_RANK = "rank"; + // Scanner Key + final static String SCANNER = "scanner"; + // The amount of rows to return at one time. + final static String SCANNER_RESULT_SIZE = "result_size"; + final static String SCANNER_START_ROW = "start_row"; + final static String SCANNER_STOP_ROW = "stop_row"; + final static String SCANNER_FILTER = "filter"; + final static String SCANNER_TIMESTAMP = "timestamp"; + final static String NUM_VERSIONS = "num_versions"; + final static String SCANNER_COLUMN = "column"; + // static items used on the path + static final String DISABLE = "disable"; + static final String ENABLE = "enable"; + static final String REGIONS = "regions"; + static final String ROW = "row"; + static final String TIME_STAMPS = "timestamps"; + static final String METADATA = "metadata"; + + static final String NAME = "name"; + static final String VALUE = "value"; + static final String ROWS = "rows"; + + static final FactoryMap filterFactories = FactoryMap.getFactoryMap(); + static final String LIMIT = "limit"; + + static class FactoryMap { + + protected static boolean created = false; + protected HashMap map = new HashMap(); + + protected FactoryMap() { + } + + public static FactoryMap getFactoryMap() { + if (!created) { + created = true; + FactoryMap f = new FactoryMap(); + f.initialize(); + return f; + } else { + return null; + } + } + + public FilterFactory get(String c) { + return map.get(c); + } + + protected void initialize() { + map.put("ColumnValueFilter", new ColumnValueFilterFactory()); + map.put("InclusiveStopRowFilter", new InclusiveStopRowFilterFactory()); + map.put("PageRowFilter", new PageRowFilterFactory()); + map.put("RegExpRowFilter", new RegExpRowFilterFactory()); + map.put("RowFilterSet", new RowFilterSetFactory()); + map.put("StopRowFilter", new StopRowFilterFactory()); + map.put("WhileMatchRowFilter", new WhileMatchRowFilterFactory()); + } + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/RowController.java b/src/java/org/apache/hadoop/hbase/rest/RowController.java new file mode 100644 index 000000000000..d95be68fce20 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/RowController.java @@ -0,0 +1,135 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.io.BatchUpdate; +import org.apache.hadoop.hbase.io.RowResult; +import org.apache.hadoop.hbase.rest.descriptors.RowUpdateDescriptor; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.parser.IHBaseRestParser; +import org.apache.hadoop.hbase.util.Bytes; + +public class RowController extends AbstractController { + + @SuppressWarnings("unused") + private Log LOG = LogFactory.getLog(RowController.class); + + protected RowModel getModel() { + return (RowModel) model; + } + + @Override + protected AbstractModel generateModel( + @SuppressWarnings("hiding") HBaseConfiguration conf, HBaseAdmin admin) { + return new RowModel(conf, admin); + } + + @Override + public void get(Status s, byte[][] pathSegments, + Map queryMap) throws HBaseRestException { + RowModel innerModel = getModel(); + s.setNoQueryResults(); + + byte[] tableName; + byte[] rowName; + + tableName = pathSegments[0]; + rowName = pathSegments[2]; + RowResult row = null; + + if (queryMap.size() == 0 && pathSegments.length <= 3) { + row = innerModel.get(tableName, rowName); + } else if (pathSegments.length == 4 + && Bytes.toString(pathSegments[3]).toLowerCase().equals( + RESTConstants.TIME_STAMPS)) { + innerModel.getTimestamps(tableName, rowName); + } else { + row = innerModel.get(tableName, rowName, this.getColumnsFromQueryMap(queryMap)); + } + if(row == null) { + throw new HBaseRestException("row not found"); + } + s.setOK(row); + s.respond(); + } + + @Override + public void post(Status s, byte[][] pathSegments, + Map queryMap, byte[] input, IHBaseRestParser parser) + throws HBaseRestException { + RowModel innerModel = getModel(); + + BatchUpdate b; + RowUpdateDescriptor rud = parser + .getRowUpdateDescriptor(input, pathSegments); + + if (input.length == 0) { + s.setUnsupportedMediaType("no data send with post request"); + s.respond(); + return; + } + + b = new BatchUpdate(rud.getRowName()); + + for (byte[] key : rud.getColVals().keySet()) { + b.put(key, rud.getColVals().get(key)); + } + + try { + innerModel.post(rud.getTableName().getBytes(), b); + s.setOK(); + } catch (HBaseRestException e) { + s.setUnsupportedMediaType(e.getMessage()); + } + s.respond(); + } + + @Override + public void put(Status s, byte[][] pathSegments, + Map queryMap, byte[] input, IHBaseRestParser parser) + throws HBaseRestException { + s.setMethodNotImplemented(); + s.respond(); + } + + @Override + public void delete(Status s, byte[][] pathSegments, + Map queryMap) throws HBaseRestException { + RowModel innerModel = getModel(); + byte[] tableName; + byte[] rowName; + + tableName = pathSegments[0]; + rowName = pathSegments[2]; + if(queryMap.size() == 0) { + innerModel.delete(tableName, rowName); + } else { + innerModel.delete(tableName, rowName, this.getColumnsFromQueryMap(queryMap)); + } + s.setOK(); + s.respond(); + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/RowHandler.java b/src/java/org/apache/hadoop/hbase/rest/RowHandler.java index 9561fac1340f..e69de29bb2d1 100644 --- a/src/java/org/apache/hadoop/hbase/rest/RowHandler.java +++ b/src/java/org/apache/hadoop/hbase/rest/RowHandler.java @@ -1,346 +0,0 @@ -package org.apache.hadoop.hbase.rest; - -import java.io.IOException; -import java.net.URLDecoder; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.TreeMap; - -import javax.servlet.ServletException; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; - -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.io.BatchUpdate; -import org.apache.hadoop.hbase.io.Cell; -import org.apache.hadoop.hbase.util.Bytes; -import org.w3c.dom.Document; -import org.w3c.dom.Element; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; -import org.znerd.xmlenc.XMLOutputter; - -public class RowHandler extends GenericHandler { - - public RowHandler(HBaseConfiguration conf, HBaseAdmin admin) - throws ServletException { - super(conf, admin); - } - - public void doGet(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) throws ServletException, IOException { - HTable table = getTable(pathSegments[0]); - if (pathSegments[1].toLowerCase().equals(ROW)) { - // get a row - getRow(table, request, response, pathSegments); - } else { - doNotFound(response, "Not handled in RowHandler"); - } - } - - public void doPost(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) throws ServletException, IOException { - putRow(request, response, pathSegments); - } - - public void doPut(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) throws ServletException, IOException { - doPost(request, response, pathSegments); - } - - public void doDelete(HttpServletRequest request, - HttpServletResponse response, String[] pathSegments) - throws ServletException, IOException { - deleteRow(request, response, pathSegments); - } - - /* - * @param request - * @param response - * @param pathSegments info path split on the '/' character. First segment - * is the tablename, second is 'row', and third is the row id. - * @throws IOException - * Retrieve a row in one of several output formats. - */ - private void getRow(HTable table, final HttpServletRequest request, - final HttpServletResponse response, final String [] pathSegments) - throws IOException { - // pull the row key out of the path - byte[] row = Bytes.toBytes(URLDecoder.decode(pathSegments[2], HConstants.UTF8_ENCODING)); - - String timestampStr = null; - if (pathSegments.length == 4) { - // A timestamp has been supplied. - timestampStr = pathSegments[3]; - if (timestampStr.equals("timestamps")) { - // Not supported in hbase just yet. TODO - doMethodNotAllowed(response, "Not yet supported by hbase"); - return; - } - } - - String[] column_params = request.getParameterValues(COLUMN); - - byte[][] columns = null; - - if (column_params != null && column_params.length > 0) { - List available_columns = new ArrayList(); - for (String column_param : column_params) { - if (column_param.length() > 0 && table.getTableDescriptor().hasFamily(Bytes.toBytes(column_param))) { - available_columns.add(column_param); - } - } - columns = Bytes.toByteArrays(available_columns.toArray(new String[0])); - } - - String[] version_params = request.getParameterValues(VERSION); - int version = 0; - if (version_params != null && version_params.length == 1) { - version = Integer.parseInt(version_params[0]); - } - - if (version > 0 && columns != null) { - Map result = new TreeMap(Bytes.BYTES_COMPARATOR); - - for (byte[] col : columns) { - Cell[] cells = timestampStr == null ? table.get(row, col, version) - : table.get(row, col, Long.parseLong(timestampStr), version); - if (cells != null) { - result.put(col, cells); - } - } - - if (result == null || result.size() == 0) { - doNotFound(response, "Row not found!"); - } else { - switch (ContentType.getContentType(request.getHeader(ACCEPT))) { - case XML: - outputRowWithMultiVersionsXml(request, response, result); - break; - case MIME: - default: - doNotAcceptable(response, "Unsupported Accept Header Content: " - + request.getHeader(CONTENT_TYPE)); - } - } - } else { - Map result = timestampStr == null ? table.getRow(row, columns) : table.getRow(row, columns, Long.parseLong(timestampStr)); - if (result == null || result.size() == 0) { - doNotFound(response, "Row not found!"); - } else { - switch (ContentType.getContentType(request.getHeader(ACCEPT))) { - case XML: - outputRowXml(request, response, result); - break; - case MIME: - default: - doNotAcceptable(response, "Unsupported Accept Header Content: " - + request.getHeader(CONTENT_TYPE)); - } - } - } - } - - /* - * Output a row encoded as XML. - * @param response - * @param result - * @throws IOException - */ - private void outputRowXml(final HttpServletRequest request, - final HttpServletResponse response, final Map result) - throws IOException { - setResponseHeader(response, result.size() > 0? 200: 204, - ContentType.XML.toString()); - XMLOutputter outputter = getXMLOutputter(response.getWriter()); - outputter.startTag(ROW); - doElement(outputter, "count", String.valueOf(result.size())); - outputColumnsXml(request, outputter, result); - outputter.endTag(); - outputter.endDocument(); - outputter.getWriter().close(); - } - - private void outputRowWithMultiVersionsXml(final HttpServletRequest request, - final HttpServletResponse response, final Map result) - throws IOException { - setResponseHeader(response, result.size() > 0? 200: 204, - ContentType.XML.toString()); - XMLOutputter outputter = getXMLOutputter(response.getWriter()); - outputter.startTag(ROW); - doElement(outputter, "count", String.valueOf(result.size())); - outputColumnsWithMultiVersionsXml(request, outputter, result); - outputter.endTag(); - outputter.endDocument(); - outputter.getWriter().close(); - } - /* - * @param response - * @param result - * Output the results contained in result as a multipart/related response. - */ - // private void outputRowMime(final HttpServletResponse response, - // final Map result) - // throws IOException { - // response.setStatus(result.size() > 0? 200: 204); - // // This code ties me to the jetty server. - // MultiPartResponse mpr = new MultiPartResponse(response); - // // Content type should look like this for multipart: - // // Content-type: multipart/related;start="";type="application/xop+xml";boundary="uuid:94ebf1e6-7eb5-43f1-85f4-2615fc40c5d6";start-info="text/xml" - // String ct = ContentType.MIME.toString() + ";charset=\"UTF-8\";boundary=\"" + - // mpr.getBoundary() + "\""; - // // Setting content type is broken. I'm unable to set parameters on the - // // content-type; They get stripped. Can't set boundary, etc. - // // response.addHeader("Content-Type", ct); - // response.setContentType(ct); - // outputColumnsMime(mpr, result); - // mpr.close(); - // } - - /* - * @param request - * @param response - * @param pathSegments - * Do a put based on the client request. - */ - private void putRow(final HttpServletRequest request, - final HttpServletResponse response, final String [] pathSegments) - throws IOException, ServletException { - HTable table = getTable(pathSegments[0]); - - // pull the row key out of the path - String row = URLDecoder.decode(pathSegments[2], HConstants.UTF8_ENCODING); - - switch(ContentType.getContentType(request.getHeader(CONTENT_TYPE))) { - case XML: - putRowXml(table, row, request, response, pathSegments); - break; - case MIME: - doNotAcceptable(response, "Don't support multipart/related yet..."); - break; - default: - doNotAcceptable(response, "Unsupported Accept Header Content: " + - request.getHeader(CONTENT_TYPE)); - } - } - - /* - * @param request - * @param response - * @param pathSegments - * Decode supplied XML and do a put to Hbase. - */ - private void putRowXml(HTable table, String row, - final HttpServletRequest request, final HttpServletResponse response, - final String [] pathSegments) - throws IOException, ServletException{ - - DocumentBuilderFactory docBuilderFactory - = DocumentBuilderFactory.newInstance(); - //ignore all comments inside the xml file - docBuilderFactory.setIgnoringComments(true); - - DocumentBuilder builder = null; - Document doc = null; - - String timestamp = pathSegments.length >= 4 ? pathSegments[3] : null; - - try{ - builder = docBuilderFactory.newDocumentBuilder(); - doc = builder.parse(request.getInputStream()); - } catch (javax.xml.parsers.ParserConfigurationException e) { - throw new ServletException(e); - } catch (org.xml.sax.SAXException e){ - throw new ServletException(e); - } - - BatchUpdate batchUpdate; - - try{ - // start an update - batchUpdate = timestamp == null ? - new BatchUpdate(row) : new BatchUpdate(row, Long.parseLong(timestamp)); - - // set the columns from the xml - NodeList columns = doc.getElementsByTagName("column"); - - for(int i = 0; i < columns.getLength(); i++){ - // get the current column element we're working on - Element column = (Element)columns.item(i); - - // extract the name and value children - Node name_node = column.getElementsByTagName("name").item(0); - String name = name_node.getFirstChild().getNodeValue(); - - Node value_node = column.getElementsByTagName("value").item(0); - - byte[] value = new byte[0]; - - // for some reason there's no value here. probably indicates that - // the consumer passed a null as the cell value. - if(value_node.getFirstChild() != null && - value_node.getFirstChild().getNodeValue() != null){ - // decode the base64'd value - value = org.apache.hadoop.hbase.util.Base64.decode( - value_node.getFirstChild().getNodeValue()); - } - - // put the value - batchUpdate.put(name, value); - } - - // commit the update - table.commit(batchUpdate); - - // respond with a 200 - response.setStatus(200); - } - catch(Exception e){ - throw new ServletException(e); - } - } - - /* - * @param request - * @param response - * @param pathSegments - * Delete some or all cells for a row. - */ - private void deleteRow(final HttpServletRequest request, - final HttpServletResponse response, final String [] pathSegments) - throws IOException, ServletException { - // grab the table we're operating on - HTable table = getTable(getTableName(pathSegments)); - - // pull the row key out of the path - String row = URLDecoder.decode(pathSegments[2], HConstants.UTF8_ENCODING); - - String[] columns = request.getParameterValues(COLUMN); - - // hack - we'll actually test for the presence of the timestamp parameter - // eventually - boolean timestamp_present = false; - if(timestamp_present){ // do a timestamp-aware delete - doMethodNotAllowed(response, "DELETE with a timestamp not implemented!"); - } - else{ // ignore timestamps - if(columns == null || columns.length == 0){ - // retrieve all the columns - doMethodNotAllowed(response, - "DELETE without specified columns not implemented!"); - } else{ - // delete each column in turn - for(int i = 0; i < columns.length; i++){ - table.deleteAll(row, columns[i]); - } - } - response.setStatus(202); - } - } -} diff --git a/src/java/org/apache/hadoop/hbase/rest/RowModel.java b/src/java/org/apache/hadoop/hbase/rest/RowModel.java new file mode 100644 index 000000000000..1b8ce8c4f805 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/RowModel.java @@ -0,0 +1,140 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.io.IOException; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.io.BatchUpdate; +import org.apache.hadoop.hbase.io.RowResult; +import org.apache.hadoop.hbase.rest.descriptors.TimestampsDescriptor; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; + +public class RowModel extends AbstractModel { + + @SuppressWarnings("unused") + private Log LOG = LogFactory.getLog(RowModel.class); + + public RowModel(HBaseConfiguration conf, HBaseAdmin admin) { + super.initialize(conf, admin); + } + + public RowResult get(byte[] tableName, byte[] rowName) + throws HBaseRestException { + try { + HTable table = new HTable(tableName); + return table.getRow(rowName); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public RowResult get(byte[] tableName, byte[] rowName, byte[][] columns) + throws HBaseRestException { + try { + HTable table = new HTable(tableName); + return table.getRow(rowName, columns); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public RowResult get(byte[] tableName, byte[] rowName, byte[][] columns, + long timestamp) throws HBaseRestException { + try { + HTable table = new HTable(tableName); + return table.getRow(rowName, columns, timestamp); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public RowResult get(byte[] tableName, byte[] rowName, long timestamp) + throws HBaseRestException { + try { + HTable table = new HTable(tableName); + return table.getRow(rowName, timestamp); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public TimestampsDescriptor getTimestamps( + @SuppressWarnings("unused") byte[] tableName, + @SuppressWarnings("unused") byte[] rowName) throws HBaseRestException { + // try { + // TimestampsDescriptor tsd = new TimestampsDescriptor(); + // HTable table = new HTable(tableName); + // RowResult row = table.getRow(rowName); + + throw new HBaseRestException("operation currently unsupported"); + + // } catch (IOException e) { + // throw new HBaseRestException("Error finding timestamps for row: " + // + Bytes.toString(rowName), e); + // } + + } + + public void post(byte[] tableName, BatchUpdate b) throws HBaseRestException { + try { + HTable table = new HTable(tableName); + table.commit(b); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public void post(byte[] tableName, List b) + throws HBaseRestException { + try { + HTable table = new HTable(tableName); + table.commit(b); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public void delete(byte[] tableName, byte[] rowName) + throws HBaseRestException { + try { + HTable table = new HTable(tableName); + table.deleteAll(rowName); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public void delete(byte[] tableName, byte[] rowName, byte[][] columns) throws HBaseRestException { + try { + HTable table = new HTable(tableName); + for (byte[] column : columns) { + table.deleteAll(rowName, column); + } + } catch (IOException e) { + throw new HBaseRestException(e); + } + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/ScannerController.java b/src/java/org/apache/hadoop/hbase/rest/ScannerController.java new file mode 100644 index 000000000000..d8f17fcf9ddc --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/ScannerController.java @@ -0,0 +1,358 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.regex.Pattern; + +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.filter.RowFilterInterface; +import org.apache.hadoop.hbase.filter.RowFilterSet; +import org.apache.hadoop.hbase.filter.StopRowFilter; +import org.apache.hadoop.hbase.filter.WhileMatchRowFilter; +import org.apache.hadoop.hbase.rest.descriptors.ScannerDescriptor; +import org.apache.hadoop.hbase.rest.descriptors.ScannerIdentifier; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.filter.FilterFactory; +import org.apache.hadoop.hbase.rest.parser.IHBaseRestParser; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * + */ +public class ScannerController extends AbstractController { + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.AbstractController#delete(org.apache.hadoop + * .hbase.rest.Status, byte[][], java.util.Map) + */ + @Override + public void delete(Status s, byte[][] pathSegments, + Map queryMap) throws HBaseRestException { + ScannerModel innerModel = this.getModel(); + if (pathSegments.length == 3 + && Bytes.toString(pathSegments[1]).toLowerCase().equals( + RESTConstants.SCANNER)) { + // get the scannerId + Integer scannerId = null; + String scannerIdString = new String(pathSegments[2]); + if (!Pattern.matches("^\\d+$", scannerIdString)) { + throw new HBaseRestException( + "the scannerid in the path and must be an integer"); + } + scannerId = Integer.parseInt(scannerIdString); + + try { + innerModel.scannerClose(scannerId); + s.setOK(); + } catch (HBaseRestException e) { + s.setNotFound(); + } + } else { + s.setBadRequest("invalid query"); + } + s.respond(); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.AbstractController#generateModel(org.apache + * .hadoop.hbase.HBaseConfiguration, + * org.apache.hadoop.hbase.client.HBaseAdmin) + */ + @Override + protected AbstractModel generateModel(HBaseConfiguration conf, HBaseAdmin a) { + return new ScannerModel(conf, a); + } + + protected ScannerModel getModel() { + return (ScannerModel) model; + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.AbstractController#get(org.apache.hadoop.hbase + * .rest.Status, byte[][], java.util.Map) + */ + @Override + public void get(Status s, byte[][] pathSegments, + Map queryMap) throws HBaseRestException { + + s.setBadRequest("invalid query"); + s.respond(); + + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.AbstractController#post(org.apache.hadoop. + * hbase.rest.Status, byte[][], java.util.Map, byte[], + * org.apache.hadoop.hbase.rest.parser.IHBaseRestParser) + */ + @Override + public void post(Status s, byte[][] pathSegments, + Map queryMap, byte[] input, IHBaseRestParser parser) + throws HBaseRestException { + ScannerModel innerModel = this.getModel(); + byte[] tableName; + tableName = pathSegments[0]; + + // Otherwise we interpret this request as a scanner request. + if (pathSegments.length == 2 + && Bytes.toString(pathSegments[1]).toLowerCase().equals( + RESTConstants.SCANNER)) { // new scanner request + ScannerDescriptor sd = this.getScannerDescriptor(queryMap); + s.setScannerCreated(createScanner(innerModel, tableName, sd)); + } else if (pathSegments.length == 3 + && Bytes.toString(pathSegments[1]).toLowerCase().equals( + RESTConstants.SCANNER)) { // open scanner request + // first see if the limit variable is present + Long numRows = 1L; + String[] numRowsString = queryMap.get(RESTConstants.LIMIT); + if (numRowsString != null && Pattern.matches("^\\d+$", numRowsString[0])) { + numRows = Long.parseLong(numRowsString[0]); + } + // get the scannerId + Integer scannerId = null; + String scannerIdString = new String(pathSegments[2]); + if (!Pattern.matches("^\\d+$", scannerIdString)) { + throw new HBaseRestException( + "the scannerid in the path and must be an integer"); + } + scannerId = Integer.parseInt(scannerIdString); + + try { + s.setOK(innerModel.scannerGet(scannerId, numRows)); + } catch (HBaseRestException e) { + s.setNotFound(); + } + } else { + s.setBadRequest("Unknown Query."); + } + s.respond(); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.AbstractController#put(org.apache.hadoop.hbase + * .rest.Status, byte[][], java.util.Map, byte[], + * org.apache.hadoop.hbase.rest.parser.IHBaseRestParser) + */ + @Override + public void put(Status s, byte[][] pathSegments, + Map queryMap, byte[] input, IHBaseRestParser parser) + throws HBaseRestException { + + s.setBadRequest("invalid query"); + s.respond(); + + } + + private ScannerDescriptor getScannerDescriptor(Map queryMap) { + long timestamp = 0L; + byte[] startRow = null; + byte[] stopRow = null; + String filters = null; + + String[] timeStampString = queryMap.get(RESTConstants.SCANNER_TIMESTAMP); + if (timeStampString != null && timeStampString.length == 1) { + timestamp = Long.parseLong(timeStampString[0]); + } + + String[] startRowString = queryMap.get(RESTConstants.SCANNER_START_ROW); + if (startRowString != null && startRowString.length == 1) { + startRow = Bytes.toBytes(startRowString[0]); + } + + String[] stopRowString = queryMap.get(RESTConstants.SCANNER_STOP_ROW); + if (stopRowString != null && stopRowString.length == 1) { + stopRow = Bytes.toBytes(stopRowString[0]); + } + + String[] filtersStrings = queryMap.get(RESTConstants.SCANNER_FILTER); + if (filtersStrings != null && filtersStrings.length > 0) { + filters = ""; + for (@SuppressWarnings("unused") + String filter : filtersStrings) { + // TODO filters are not hooked up yet... And the String should probably + // be changed to a set + } + } + return new ScannerDescriptor(this.getColumnsFromQueryMap(queryMap), + timestamp, startRow, stopRow, filters); + } + + protected ScannerIdentifier createScanner(ScannerModel innerModel, + byte[] tableName, ScannerDescriptor scannerDescriptor) + throws HBaseRestException { + + RowFilterInterface filterSet = null; + + // Might want to change this. I am doing this so that I can use + // a switch statement that is more efficient. + int switchInt = 0; + if (scannerDescriptor.getColumns() != null + && scannerDescriptor.getColumns().length > 0) { + switchInt += 1; + } + switchInt += (scannerDescriptor.getTimestamp() != 0L) ? (1 << 1) : 0; + switchInt += (scannerDescriptor.getStartRow().length > 0) ? (1 << 2) : 0; + switchInt += (scannerDescriptor.getStopRow().length > 0) ? (1 << 3) : 0; + if (scannerDescriptor.getFilters() != null + && !scannerDescriptor.getFilters().equals("")) { + switchInt += (scannerDescriptor.getFilters() != null) ? (1 << 4) : 0; + filterSet = unionFilters(scannerDescriptor.getFilters()); + } + + return scannerSwitch(switchInt, innerModel, tableName, scannerDescriptor + .getColumns(), scannerDescriptor.getTimestamp(), scannerDescriptor + .getStartRow(), scannerDescriptor.getStopRow(), filterSet); + } + + public ScannerIdentifier scannerSwitch(int switchInt, + ScannerModel innerModel, byte[] tableName, byte[][] columns, + long timestamp, byte[] startRow, byte[] stopRow, + RowFilterInterface filterSet) throws HBaseRestException { + switch (switchInt) { + case 0: + return innerModel.scannerOpen(tableName); + case 1: + return innerModel.scannerOpen(tableName, columns); + case 2: + return innerModel.scannerOpen(tableName, timestamp); + case 3: + return innerModel.scannerOpen(tableName, columns, timestamp); + case 4: + return innerModel.scannerOpen(tableName, startRow); + case 5: + return innerModel.scannerOpen(tableName, columns, startRow); + case 6: + return innerModel.scannerOpen(tableName, startRow, timestamp); + case 7: + return innerModel.scannerOpen(tableName, columns, startRow, timestamp); + case 8: + return innerModel.scannerOpen(tableName, getStopRow(stopRow)); + case 9: + return innerModel.scannerOpen(tableName, columns, getStopRow(stopRow)); + case 10: + return innerModel.scannerOpen(tableName, timestamp, getStopRow(stopRow)); + case 11: + return innerModel.scannerOpen(tableName, columns, timestamp, + getStopRow(stopRow)); + case 12: + return innerModel.scannerOpen(tableName, startRow, getStopRow(stopRow)); + case 13: + return innerModel.scannerOpen(tableName, columns, startRow, + getStopRow(stopRow)); + case 14: + return innerModel.scannerOpen(tableName, startRow, timestamp, + getStopRow(stopRow)); + case 15: + return innerModel.scannerOpen(tableName, columns, startRow, timestamp, + getStopRow(stopRow)); + case 16: + return innerModel.scannerOpen(tableName, filterSet); + case 17: + return innerModel.scannerOpen(tableName, columns, filterSet); + case 18: + return innerModel.scannerOpen(tableName, timestamp, filterSet); + case 19: + return innerModel.scannerOpen(tableName, columns, timestamp, filterSet); + case 20: + return innerModel.scannerOpen(tableName, startRow, filterSet); + case 21: + return innerModel.scannerOpen(tableName, columns, startRow, filterSet); + case 22: + return innerModel.scannerOpen(tableName, startRow, timestamp, filterSet); + case 23: + return innerModel.scannerOpen(tableName, columns, startRow, timestamp, + filterSet); + case 24: + return innerModel.scannerOpen(tableName, getStopRowUnionFilter(stopRow, + filterSet)); + case 25: + return innerModel.scannerOpen(tableName, columns, getStopRowUnionFilter( + stopRow, filterSet)); + case 26: + return innerModel.scannerOpen(tableName, timestamp, + getStopRowUnionFilter(stopRow, filterSet)); + case 27: + return innerModel.scannerOpen(tableName, columns, timestamp, + getStopRowUnionFilter(stopRow, filterSet)); + case 28: + return innerModel.scannerOpen(tableName, startRow, getStopRowUnionFilter( + stopRow, filterSet)); + case 29: + return innerModel.scannerOpen(tableName, columns, startRow, + getStopRowUnionFilter(stopRow, filterSet)); + case 30: + return innerModel.scannerOpen(tableName, startRow, timestamp, + getStopRowUnionFilter(stopRow, filterSet)); + case 31: + return innerModel.scannerOpen(tableName, columns, startRow, timestamp, + getStopRowUnionFilter(stopRow, filterSet)); + default: + return null; + } + } + + protected RowFilterInterface getStopRow(byte[] stopRow) { + return new WhileMatchRowFilter(new StopRowFilter(stopRow)); + } + + protected RowFilterInterface getStopRowUnionFilter(byte[] stopRow, + RowFilterInterface filter) { + Set filterSet = new HashSet(); + filterSet.add(getStopRow(stopRow)); + filterSet.add(filter); + return new RowFilterSet(filterSet); + } + + /** + * Given a list of filters in JSON string form, returns a RowSetFilter that + * returns true if all input filters return true on a Row (aka an AND + * statement). + * + * @param filters + * array of input filters in a JSON String + * @return RowSetFilter with all input filters in an AND Statement + * @throws org.apache.hadoop.hbase.rest.exception.HBaseRestException + */ + protected RowFilterInterface unionFilters(String filters) + throws HBaseRestException { + FilterFactory f = RESTConstants.filterFactories.get("RowFilterSet"); + return f.getFilterFromJSON(filters); + } + +} diff --git a/src/java/org/apache/hadoop/hbase/rest/ScannerHandler.java b/src/java/org/apache/hadoop/hbase/rest/ScannerHandler.java index c1df133481ae..e69de29bb2d1 100644 --- a/src/java/org/apache/hadoop/hbase/rest/ScannerHandler.java +++ b/src/java/org/apache/hadoop/hbase/rest/ScannerHandler.java @@ -1,339 +0,0 @@ -/** - * Copyright 2007 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.rest; - -import java.io.IOException; -import java.net.URLDecoder; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import javax.servlet.ServletException; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; - -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.Scanner; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.JenkinsHash; -import org.apache.hadoop.hbase.io.Cell; -import org.apache.hadoop.hbase.io.RowResult; -import org.znerd.xmlenc.XMLOutputter; - -/** - * ScannderHandler fields all scanner related requests. - */ -public class ScannerHandler extends GenericHandler { - private static final String ROWS = "rows"; - - public ScannerHandler(HBaseConfiguration conf, HBaseAdmin admin) - throws ServletException{ - super(conf, admin); - } - - private class ScannerRecord { - private final Scanner scanner; - private List nextRows; - - ScannerRecord(final Scanner s) { - this.scanner = s; - nextRows = new ArrayList(); - } - - public Scanner getScanner() { - return this.scanner; - } - - public boolean hasNext(int nbRows) throws IOException { - if (nextRows.size() < nbRows) { - RowResult[] results = scanner.next(nbRows - nextRows.size()); - for (RowResult result : results) { - nextRows.add(result); - } - return nextRows.size() > 0; - } else { - return true; - } - } - - /** - * Call next on the scanner. - * @return Null if finished, RowResult otherwise - * @throws IOException - */ - public RowResult[] next(int nbRows) throws IOException { - if (!hasNext(nbRows)) { - return null; - } - RowResult[] temp = nextRows.toArray(new RowResult[nextRows.size()]); - nextRows.clear(); - return temp; - } - } - - /* - * Map of outstanding scanners keyed by scannerid. - */ - private final Map scanners = - new HashMap(); - - public void doGet(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { - doMethodNotAllowed(response, "GET to a scanner not supported."); - } - - public void doPost(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { - if (pathSegments.length == 2) { - // trying to create a scanner - openScanner(request, response, pathSegments); - } - else if (pathSegments.length == 3) { - // advancing a scanner - getScanner(request, response, pathSegments[2]); - } - else{ - doNotFound(response, "No handler for request"); - } - } - - public void doPut(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { - doPost(request, response, pathSegments); - } - - public void doDelete(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { - deleteScanner(response, pathSegments[2]); - } - - /* - * Advance scanner and return current position. - * @param request - * @param response - * @param scannerid - * @throws IOException - */ - private void getScanner(final HttpServletRequest request, - final HttpServletResponse response, final String scannerid) - throws IOException { - ScannerRecord sr = this.scanners.get(scannerid); - if (sr == null) { - doNotFound(response, "No such scanner."); - return; - } - - String limitString = request.getParameter(LIMIT); - int limit = 1; - if (limitString != null && limitString.length() > 0) { - limit = Integer.valueOf(limitString); - } - if (sr.hasNext(limit)) { - switch (ContentType.getContentType(request.getHeader(ACCEPT))) { - case XML: - outputScannerEntryXML(response, sr, limit); - break; - case MIME: -/* outputScannerEntryMime(response, sr);*/ - doNotAcceptable(response); - break; - default: - doNotAcceptable(response); - } - } - else{ - this.scanners.remove(scannerid); - doNotFound(response, "Scanner is expended"); - } - } - - private void outputScannerEntryXML(final HttpServletResponse response, - final ScannerRecord sr, int limit) - throws IOException { - // respond with a 200 and Content-type: text/xml - setResponseHeader(response, 200, ContentType.XML.toString()); - - // setup an xml outputter - XMLOutputter outputter = getXMLOutputter(response.getWriter()); - - boolean rows = false; - - if (limit > 1) { - outputter.startTag(ROWS); - rows = true; - } - - RowResult[] rowResults = sr.next(limit); - - for (RowResult rowResult: rowResults) { - outputter.startTag(ROW); - - // write the row key - doElement(outputter, "name", - org.apache.hadoop.hbase.util.Base64.encodeBytes(rowResult.getRow())); - - outputColumnsXml(outputter, rowResult); - outputter.endTag(); - } - - if (rows) { - outputter.endTag(); - } - - outputter.endDocument(); - outputter.getWriter().close(); - } - - // private void outputScannerEntryMime(final HttpServletResponse response, - // final ScannerRecord sr) - // throws IOException { - // response.setStatus(200); - // // This code ties me to the jetty server. - // MultiPartResponse mpr = new MultiPartResponse(response); - // // Content type should look like this for multipart: - // // Content-type: multipart/related;start="";type="application/xop+xml";boundary="uuid:94ebf1e6-7eb5-43f1-85f4-2615fc40c5d6";start-info="text/xml" - // String ct = ContentType.MIME.toString() + ";charset=\"UTF-8\";boundary=\"" + - // mpr.getBoundary() + "\""; - // // Setting content type is broken. I'm unable to set parameters on the - // // content-type; They get stripped. Can't set boundary, etc. - // // response.addHeader("Content-Type", ct); - // response.setContentType(ct); - // // Write row, key-column and timestamp each in its own part. - // mpr.startPart("application/octet-stream", - // new String [] {"Content-Description: row", - // "Content-Transfer-Encoding: binary", - // "Content-Length: " + sr.getKey().getRow().getBytes().length}); - // mpr.getOut().write(sr.getKey().getRow().getBytes()); - // - // // Usually key-column is empty when scanning. - // if (sr.getKey().getColumn() != null && - // sr.getKey().getColumn().getLength() > 0) { - // mpr.startPart("application/octet-stream", - // new String [] {"Content-Description: key-column", - // "Content-Transfer-Encoding: binary", - // "Content-Length: " + sr.getKey().getColumn().getBytes().length}); - // } - // mpr.getOut().write(sr.getKey().getColumn().getBytes()); - // // TODO: Fix. Need to write out the timestamp in the ordained timestamp - // // format. - // byte [] timestampBytes = Long.toString(sr.getKey().getTimestamp()).getBytes(); - // mpr.startPart("application/octet-stream", - // new String [] {"Content-Description: timestamp", - // "Content-Transfer-Encoding: binary", - // "Content-Length: " + timestampBytes.length}); - // mpr.getOut().write(timestampBytes); - // // Write out columns - // outputColumnsMime(mpr, sr.getValue()); - // mpr.close(); - // } - - /* - * Create scanner - * @param request - * @param response - * @param pathSegments - * @throws IOException - */ - private void openScanner(final HttpServletRequest request, - final HttpServletResponse response, final String [] pathSegments) - throws IOException, ServletException { - // get the table - HTable table = getTable(getTableName(pathSegments)); - - // get the list of columns we're supposed to interact with - String[] raw_columns = request.getParameterValues(COLUMN); - byte [][] columns = null; - - if (raw_columns != null) { - columns = new byte [raw_columns.length][]; - for (int i = 0; i < raw_columns.length; i++) { - // I think this decoding is redundant. - columns[i] = - Bytes.toBytes(URLDecoder.decode(raw_columns[i], HConstants.UTF8_ENCODING)); - } - } else { - // TODO: Need to put into the scanner all of the table's column - // families. TODO: Verify this returns all rows. For now just fail. - doMethodNotAllowed(response, "Unspecified columns parameter currently not supported!"); - return; - } - - // TODO: Parse according to the timestamp format we agree on. - String raw_ts = request.getParameter(TIMESTAMP); - - // TODO: Are these decodings redundant? - byte [] startRow = request.getParameter(START_ROW) == null? - HConstants.EMPTY_START_ROW: - Bytes.toBytes(URLDecoder.decode(request.getParameter(START_ROW), - HConstants.UTF8_ENCODING)); - // Empty start row is same value as empty end row. - byte [] endRow = request.getParameter(END_ROW) == null? - HConstants.EMPTY_START_ROW: - Bytes.toBytes(URLDecoder.decode(request.getParameter(END_ROW), - HConstants.UTF8_ENCODING)); - - Scanner scanner = (request.getParameter(END_ROW) == null)? - table.getScanner(columns, startRow): - table.getScanner(columns, startRow, endRow); - - // Make a scanner id by hashing the object toString value (object name + - // an id). Will make identifier less burdensome and more url friendly. - String scannerid = - Integer.toHexString(JenkinsHash.getInstance().hash(scanner.toString().getBytes(), -1)); - ScannerRecord sr = new ScannerRecord(scanner); - - // store the scanner for subsequent requests - this.scanners.put(scannerid, sr); - - // set a 201 (Created) header and a Location pointing to the new - // scanner - response.setStatus(201); - response.addHeader("Location", request.getContextPath() + "/" + - pathSegments[0] + "/" + pathSegments[1] + "/" + scannerid); - response.getOutputStream().close(); - } - - /* - * Delete scanner - * @param response - * @param scannerid - * @throws IOException - */ - private void deleteScanner(final HttpServletResponse response, - final String scannerid) - throws IOException, ServletException { - ScannerRecord sr = this.scanners.remove(scannerid); - if (sr == null) { - doNotFound(response, "No such scanner"); - } else { - sr.getScanner().close(); - response.setStatus(200); - response.getOutputStream().close(); - } - } -} diff --git a/src/java/org/apache/hadoop/hbase/rest/ScannerModel.java b/src/java/org/apache/hadoop/hbase/rest/ScannerModel.java new file mode 100644 index 000000000000..a529eb318c80 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/ScannerModel.java @@ -0,0 +1,282 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Scanner; +import org.apache.hadoop.hbase.filter.RowFilterInterface; +import org.apache.hadoop.hbase.io.RowResult; +import org.apache.hadoop.hbase.rest.descriptors.ScannerIdentifier; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; + +/** + * + */ +public class ScannerModel extends AbstractModel { + + @SuppressWarnings("unused") + private Log LOG = LogFactory.getLog(TableModel.class); + + public ScannerModel(HBaseConfiguration config, HBaseAdmin admin) { + super.initialize(config, admin); + } + + // + // Normal Scanner + // + protected static class ScannerMaster { + + protected static Map scannerMap = new ConcurrentHashMap(); + protected static AtomicInteger nextScannerId = new AtomicInteger(1); + + public Integer addScanner(Scanner scanner) { + Integer i = new Integer(nextScannerId.getAndIncrement()); + scannerMap.put(i, scanner); + return i; + } + + public Scanner getScanner(Integer id) { + return scannerMap.get(id); + } + + public Scanner removeScanner(Integer id) { + return scannerMap.remove(id); + } + + /** + * @param id + * id of scanner to close + */ + public void scannerClose(Integer id) { + Scanner s = scannerMap.remove(id); + s.close(); + } + } + + protected static ScannerMaster scannerMaster = new ScannerMaster(); + + /** + * returns the next numResults RowResults from the Scaner mapped to Integer + * id. If the end of the table is reached, the scanner is closed and all + * succesfully retrieved rows are returned. + * + * @param id + * id target scanner is mapped to. + * @param numRows + * number of results to return. + * @return all successfully retrieved rows. + * @throws org.apache.hadoop.hbase.rest.exception.HBaseRestException + */ + public RowResult[] scannerGet(Integer id, Long numRows) + throws HBaseRestException { + try { + ArrayList a; + Scanner s; + RowResult r; + + a = new ArrayList(); + s = scannerMaster.getScanner(id); + + if (s == null) { + throw new HBaseRestException("ScannerId: " + id + + " is unavailable. Please create a new scanner"); + } + + for (int i = 0; i < numRows; i++) { + if ((r = s.next()) != null) { + a.add(r); + } else { + scannerMaster.scannerClose(id); + break; + } + } + + return a.toArray(new RowResult[0]); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + /** + * Returns all rows inbetween the scanners current position and the end of the + * table. + * + * @param id + * id of scanner to use + * @return all rows till end of table + * @throws org.apache.hadoop.hbase.rest.exception.HBaseRestException + */ + public RowResult[] scannerGet(Integer id) throws HBaseRestException { + try { + ArrayList a; + Scanner s; + RowResult r; + + a = new ArrayList(); + s = scannerMaster.getScanner(id); + + while ((r = s.next()) != null) { + a.add(r); + } + + scannerMaster.scannerClose(id); + + return a.toArray(new RowResult[0]); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public boolean scannerClose(Integer id) throws HBaseRestException { + Scanner s = scannerMaster.removeScanner(id); + + if (s == null) { + throw new HBaseRestException("Scanner id: " + id + " does not exist"); + } + return true; + } + + // Scanner Open Methods + // No Columns + public ScannerIdentifier scannerOpen(byte[] tableName) + throws HBaseRestException { + return scannerOpen(tableName, getColumns(tableName)); + } + + public ScannerIdentifier scannerOpen(byte[] tableName, long timestamp) + throws HBaseRestException { + return scannerOpen(tableName, getColumns(tableName), timestamp); + } + + public ScannerIdentifier scannerOpen(byte[] tableName, byte[] startRow) + throws HBaseRestException { + return scannerOpen(tableName, getColumns(tableName), startRow); + } + + public ScannerIdentifier scannerOpen(byte[] tableName, byte[] startRow, + long timestamp) throws HBaseRestException { + return scannerOpen(tableName, getColumns(tableName), startRow, timestamp); + } + + public ScannerIdentifier scannerOpen(byte[] tableName, + RowFilterInterface filter) throws HBaseRestException { + return scannerOpen(tableName, getColumns(tableName), filter); + } + + public ScannerIdentifier scannerOpen(byte[] tableName, long timestamp, + RowFilterInterface filter) throws HBaseRestException { + return scannerOpen(tableName, getColumns(tableName), timestamp, filter); + } + + public ScannerIdentifier scannerOpen(byte[] tableName, byte[] startRow, + RowFilterInterface filter) throws HBaseRestException { + return scannerOpen(tableName, getColumns(tableName), startRow, filter); + } + + public ScannerIdentifier scannerOpen(byte[] tableName, byte[] startRow, + long timestamp, RowFilterInterface filter) throws HBaseRestException { + return scannerOpen(tableName, getColumns(tableName), startRow, timestamp, + filter); + } + + // With Columns + public ScannerIdentifier scannerOpen(byte[] tableName, byte[][] columns, + long timestamp) throws HBaseRestException { + try { + HTable table; + table = new HTable(tableName); + return new ScannerIdentifier(scannerMaster.addScanner(table.getScanner( + columns, HConstants.EMPTY_START_ROW, timestamp))); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public ScannerIdentifier scannerOpen(byte[] tableName, byte[][] columns) + throws HBaseRestException { + return scannerOpen(tableName, columns, HConstants.LATEST_TIMESTAMP); + } + + public ScannerIdentifier scannerOpen(byte[] tableName, byte[][] columns, + byte[] startRow, long timestamp) throws HBaseRestException { + try { + HTable table; + table = new HTable(tableName); + return new ScannerIdentifier(scannerMaster.addScanner(table.getScanner( + columns, startRow, timestamp))); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public ScannerIdentifier scannerOpen(byte[] tableName, byte[][] columns, + byte[] startRow) throws HBaseRestException { + return scannerOpen(tableName, columns, startRow, + HConstants.LATEST_TIMESTAMP); + } + + public ScannerIdentifier scannerOpen(byte[] tableName, byte[][] columns, + long timestamp, RowFilterInterface filter) throws HBaseRestException { + try { + HTable table; + table = new HTable(tableName); + return new ScannerIdentifier(scannerMaster.addScanner(table.getScanner( + columns, HConstants.EMPTY_START_ROW, timestamp, filter))); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public ScannerIdentifier scannerOpen(byte[] tableName, byte[][] columns, + RowFilterInterface filter) throws HBaseRestException { + return scannerOpen(tableName, columns, HConstants.LATEST_TIMESTAMP, filter); + } + + public ScannerIdentifier scannerOpen(byte[] tableName, byte[][] columns, + byte[] startRow, long timestamp, RowFilterInterface filter) + throws HBaseRestException { + try { + HTable table; + table = new HTable(tableName); + return new ScannerIdentifier(scannerMaster.addScanner(table.getScanner( + columns, startRow, timestamp, filter))); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public ScannerIdentifier scannerOpen(byte[] tableName, byte[][] columns, + byte[] startRow, RowFilterInterface filter) throws HBaseRestException { + return scannerOpen(tableName, columns, startRow, + HConstants.LATEST_TIMESTAMP, filter); + } + +} diff --git a/src/java/org/apache/hadoop/hbase/rest/Status.java b/src/java/org/apache/hadoop/hbase/rest/Status.java new file mode 100644 index 000000000000..9cc5e8572b40 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/Status.java @@ -0,0 +1,256 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.util.HashMap; + +import javax.servlet.http.HttpServletResponse; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.rest.descriptors.ScannerIdentifier; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.serializer.IRestSerializer; +import org.apache.hadoop.hbase.rest.serializer.ISerializable; +import org.apache.hadoop.hbase.util.Bytes; + +import agilejson.TOJSON; + +public class Status { + + @SuppressWarnings("unused") + private Log LOG = LogFactory.getLog(Status.class); + + public static final HashMap statNames = new HashMap(); + + static { + statNames.put(HttpServletResponse.SC_CONTINUE, "continue"); + statNames.put(HttpServletResponse.SC_SWITCHING_PROTOCOLS, + "switching protocols"); + statNames.put(HttpServletResponse.SC_OK, "ok"); + statNames.put(HttpServletResponse.SC_CREATED, "created"); + statNames.put(HttpServletResponse.SC_ACCEPTED, "accepted"); + statNames.put(HttpServletResponse.SC_NON_AUTHORITATIVE_INFORMATION, + "non-authoritative information"); + statNames.put(HttpServletResponse.SC_NO_CONTENT, "no content"); + statNames.put(HttpServletResponse.SC_RESET_CONTENT, "reset content"); + statNames.put(HttpServletResponse.SC_PARTIAL_CONTENT, "partial content"); + statNames.put(HttpServletResponse.SC_MULTIPLE_CHOICES, "multiple choices"); + statNames + .put(HttpServletResponse.SC_MOVED_PERMANENTLY, "moved permanently"); + statNames + .put(HttpServletResponse.SC_MOVED_TEMPORARILY, "moved temporarily"); + statNames.put(HttpServletResponse.SC_FOUND, "found"); + statNames.put(HttpServletResponse.SC_SEE_OTHER, "see other"); + statNames.put(HttpServletResponse.SC_NOT_MODIFIED, "not modified"); + statNames.put(HttpServletResponse.SC_USE_PROXY, "use proxy"); + statNames.put(HttpServletResponse.SC_TEMPORARY_REDIRECT, + "temporary redirect"); + statNames.put(HttpServletResponse.SC_BAD_REQUEST, "bad request"); + statNames.put(HttpServletResponse.SC_UNAUTHORIZED, "unauthorized"); + statNames.put(HttpServletResponse.SC_FORBIDDEN, "forbidden"); + statNames.put(HttpServletResponse.SC_NOT_FOUND, "not found"); + statNames.put(HttpServletResponse.SC_METHOD_NOT_ALLOWED, + "method not allowed"); + statNames.put(HttpServletResponse.SC_NOT_ACCEPTABLE, "not acceptable"); + statNames.put(HttpServletResponse.SC_PROXY_AUTHENTICATION_REQUIRED, + "proxy authentication required"); + statNames.put(HttpServletResponse.SC_REQUEST_TIMEOUT, "request timeout"); + statNames.put(HttpServletResponse.SC_CONFLICT, "conflict"); + statNames.put(HttpServletResponse.SC_GONE, "gone"); + statNames.put(HttpServletResponse.SC_LENGTH_REQUIRED, "length required"); + statNames.put(HttpServletResponse.SC_PRECONDITION_FAILED, + "precondition failed"); + statNames.put(HttpServletResponse.SC_REQUEST_ENTITY_TOO_LARGE, + "request entity too large"); + statNames.put(HttpServletResponse.SC_REQUEST_URI_TOO_LONG, + "request uri too long"); + statNames.put(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, + "unsupported media type"); + statNames.put(HttpServletResponse.SC_REQUESTED_RANGE_NOT_SATISFIABLE, + "requested range not satisfiable"); + statNames.put(HttpServletResponse.SC_EXPECTATION_FAILED, + "expectation failed"); + statNames.put(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, + "internal server error"); + statNames.put(HttpServletResponse.SC_NOT_IMPLEMENTED, "not implemented"); + statNames.put(HttpServletResponse.SC_BAD_GATEWAY, "bad gateway"); + statNames.put(HttpServletResponse.SC_SERVICE_UNAVAILABLE, + "service unavailable"); + statNames.put(HttpServletResponse.SC_GATEWAY_TIMEOUT, "gateway timeout"); + statNames.put(HttpServletResponse.SC_HTTP_VERSION_NOT_SUPPORTED, + "http version not supported"); + } + protected int statusCode; + protected HttpServletResponse response; + protected Object message; + protected IRestSerializer serializer; + protected byte[][] pathSegments; + + public int getStatusCode() { + return statusCode; + } + + @TOJSON + public Object getMessage() { + return message; + } + + public static class StatusMessage implements ISerializable { + int statusCode; + boolean error; + Object reason; + + public StatusMessage(int statusCode, boolean error, Object o) { + this.statusCode = statusCode; + this.error = error; + reason = o; + } + + @TOJSON + public int getStatusCode() { + return statusCode; + } + + @TOJSON + public boolean getError() { + return error; + } + + @TOJSON + public Object getMessage() { + return reason; + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.xml.IOutputXML#toXML(org.apache.hadoop.hbase + * .rest.serializer.IRestSerializer) + */ + public void restSerialize(IRestSerializer serializer) + throws HBaseRestException { + serializer.serializeStatusMessage(this); + } + } + + public Status(HttpServletResponse r, IRestSerializer serializer, byte[][] bs) { + this.setOK(); + this.response = r; + this.serializer = serializer; + this.pathSegments = bs; + } + + // Good Messages + public void setOK() { + this.statusCode = HttpServletResponse.SC_OK; + this.message = new StatusMessage(HttpServletResponse.SC_OK, false, "success"); + } + + public void setOK(Object message) { + this.statusCode = HttpServletResponse.SC_OK; + this.message = message; + } + + public void setAccepted() { + this.statusCode = HttpServletResponse.SC_ACCEPTED; + this.message = new StatusMessage(HttpServletResponse.SC_ACCEPTED, false, "success"); + } + + public void setExists(boolean error) { + this.statusCode = HttpServletResponse.SC_CONFLICT; + this.message = new StatusMessage(statusCode, error, "table already exists"); + } + + public void setCreated() { + this.statusCode = HttpServletResponse.SC_CREATED; + this.setOK(); + } + + public void setScannerCreated(ScannerIdentifier scannerIdentifier) { + this.statusCode = HttpServletResponse.SC_OK; + this.message = scannerIdentifier; + response.addHeader("Location", "/" + Bytes.toString(pathSegments[0]) + + "/scanner/" + scannerIdentifier.getId()); + } + // Bad Messages + + public void setInternalError(Exception e) { + this.statusCode = HttpServletResponse.SC_INTERNAL_SERVER_ERROR; + this.message = new StatusMessage(statusCode, true, e); + } + + public void setNoQueryResults() { + this.statusCode = HttpServletResponse.SC_NOT_FOUND; + this.message = new StatusMessage(statusCode, true, "no query results"); + } + + public void setConflict(Object message) { + this.statusCode = HttpServletResponse.SC_CONFLICT; + this.message = new StatusMessage(statusCode, true, message); + } + + public void setNotFound(Object message) { + this.statusCode = HttpServletResponse.SC_NOT_FOUND; + this.message = new StatusMessage(statusCode, true, message); + } + + public void setBadRequest(Object message) { + this.statusCode = HttpServletResponse.SC_BAD_REQUEST; + this.message = new StatusMessage(statusCode, true, message); + } + + public void setNotFound() { + setNotFound("Unable to find requested URI"); + } + + public void setMethodNotImplemented() { + this.statusCode = HttpServletResponse.SC_METHOD_NOT_ALLOWED; + this.message = new StatusMessage(statusCode, true, "method not implemented"); + } + + public void setInvalidURI() { + setInvalidURI("Invalid URI"); + } + + public void setInvalidURI(Object message) { + this.statusCode = HttpServletResponse.SC_BAD_REQUEST; + this.message = new StatusMessage(statusCode, true, message); + } + + public void setUnsupportedMediaType(Object message) { + this.statusCode = HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE; + this.message = new StatusMessage(statusCode, true, message); + } + + public void setGone() { + this.statusCode = HttpServletResponse.SC_GONE; + this.message = new StatusMessage(statusCode, true, "item no longer available"); + } + + + // Utility + public void respond() throws HBaseRestException { + response.setStatus(this.statusCode); + this.serializer.writeOutput(this.message); + } + +} diff --git a/src/java/org/apache/hadoop/hbase/rest/TableController.java b/src/java/org/apache/hadoop/hbase/rest/TableController.java new file mode 100644 index 000000000000..54866f21d706 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/TableController.java @@ -0,0 +1,170 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.util.ArrayList; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.parser.IHBaseRestParser; +import org.apache.hadoop.hbase.util.Bytes; + +public class TableController extends AbstractController { + + @SuppressWarnings("unused") + private Log LOG = LogFactory.getLog(TableController.class); + + protected TableModel getModel() { + return (TableModel) model; + } + + @Override + protected AbstractModel generateModel( + @SuppressWarnings("hiding") HBaseConfiguration conf, HBaseAdmin admin) { + return new TableModel(conf, admin); + } + + @Override + public void get(Status s, byte[][] pathSegments, + Map queryMap) throws HBaseRestException { + TableModel innerModel = getModel(); + + byte[] tableName; + + tableName = pathSegments[0]; + if (pathSegments.length < 2) { + s.setOK(innerModel.getTableMetadata(Bytes.toString(tableName))); + } else { + if (Bytes.toString(pathSegments[1]).toLowerCase().equals(REGIONS)) { + s.setOK(innerModel.getTableRegions(Bytes.toString(tableName))); + } else { + s.setBadRequest("unknown query."); + } + } + s.respond(); + } + + /* + * (non-Javadoc) + * + * @param input column descriptor JSON. Should be of the form:
+   * {"column_families":[ { "name":STRING, "bloomfilter":BOOLEAN,
+   * "max_versions":INTEGER, "compression_type":STRING, "in_memory":BOOLEAN,
+   * "block_cache_enabled":BOOLEAN, "max_value_length":INTEGER,
+   * "time_to_live":INTEGER ]} 
If any of the json object fields (except + * name) are not included the default values will be included instead. The + * default values are:
 bloomfilter => false max_versions => 3
+   * compression_type => NONE in_memory => false block_cache_enabled => false
+   * max_value_length => 2147483647 time_to_live => Integer.MAX_VALUE 
+ * + * @see + * org.apache.hadoop.hbase.rest.AbstractController#post(org.apache.hadoop. + * hbase.rest.Status, byte[][], java.util.Map, byte[], + * org.apache.hadoop.hbase.rest.parser.IHBaseRestParser) + */ + @Override + public void post(Status s, byte[][] pathSegments, + Map queryMap, byte[] input, IHBaseRestParser parser) + throws HBaseRestException { + TableModel innerModel = getModel(); + + byte[] tableName; + + if (pathSegments.length == 0) { + // If no input, we don't know columnfamily schema, so send + // no data + if (input.length == 0) { + s.setBadRequest("no data send with post request"); + } else { + HTableDescriptor htd = parser.getTableDescriptor(input); + // Send to innerModel. If iM returns false, means the + // table already exists so return conflict. + if (!innerModel.post(htd.getName(), htd)) { + s.setConflict("table already exists"); + } else { + // Otherwise successfully created table. Return "created":true + s.setCreated(); + } + } + } else if (Bytes.toString(pathSegments[1]).toLowerCase().equals( + RESTConstants.ENABLE)) { + tableName = pathSegments[0]; + innerModel.enableTable(tableName); + s.setAccepted(); + } else if (Bytes.toString(pathSegments[1]).toLowerCase().equals( + RESTConstants.DISABLE)) { + tableName = pathSegments[0]; + innerModel.disableTable(tableName); + s.setAccepted(); + } else { + s.setBadRequest("Unknown Query."); + } + s.respond(); + } + + @Override + public void put(Status s, byte[][] pathSegments, + Map queryMap, byte[] input, IHBaseRestParser parser) + throws HBaseRestException { + if (pathSegments.length != 1) { + s.setBadRequest("must specifify the name of the table"); + s.respond(); + } else if (queryMap.size() > 0) { + s + .setBadRequest("no query string should be specified when updating a table"); + s.respond(); + } else { + ArrayList newColumns = parser + .getColumnDescriptors(input); + byte[] tableName = pathSegments[0]; + getModel().updateTable(Bytes.toString(tableName), newColumns); + s.setOK(); + s.respond(); + } + } + + @Override + public void delete(Status s, byte[][] pathSegments, + Map queryMap) throws HBaseRestException { + TableModel innerModel = getModel(); + + byte[] tableName; + + tableName = pathSegments[0]; + + if (pathSegments.length == 1) { + if (!innerModel.delete(tableName)) { + s.setBadRequest("table does not exist"); + } else { + s.setAccepted(); + } + s.respond(); + } else { + + } + } + +} \ No newline at end of file diff --git a/src/java/org/apache/hadoop/hbase/rest/TableHandler.java b/src/java/org/apache/hadoop/hbase/rest/TableHandler.java index f5a111fcf281..e69de29bb2d1 100644 --- a/src/java/org/apache/hadoop/hbase/rest/TableHandler.java +++ b/src/java/org/apache/hadoop/hbase/rest/TableHandler.java @@ -1,416 +0,0 @@ -/** - * Copyright 2007 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.rest; - -import java.io.IOException; -import java.io.PrintWriter; - -import javax.servlet.ServletException; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; - -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.HColumnDescriptor.CompressionType; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.util.Bytes; -import org.w3c.dom.Document; -import org.w3c.dom.Element; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; -import org.znerd.xmlenc.XMLOutputter; - - -/** - * TableHandler fields all requests that deal with an individual table. - * That means all requests that start with /api/[table_name]/... go to - * this handler. - */ -public class TableHandler extends GenericHandler { - public static final String DISABLE = "disable"; - public static final String ENABLE = "enable"; - - public TableHandler(HBaseConfiguration conf, HBaseAdmin admin) - throws ServletException{ - super(conf, admin); - } - - public void doGet(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { - // if it's just table name, return the metadata - if (pathSegments.length == 1) { - getTableMetadata(request, response, pathSegments[0]); - } - else{ - HTable table = getTable(pathSegments[0]); - if (pathSegments[1].toLowerCase().equals(REGIONS)) { - // get a region list - getTableRegions(table, request, response); - } - else{ - doNotFound(response, "Not handled in TableHandler"); - } - } - } - - public void doPost(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { - if (pathSegments.length == 0 || pathSegments[0].length() <= 0) { - // if it's a creation operation - putTable(request, response, pathSegments); - } else { - // if it's a disable operation or enable operation - String tableName = pathSegments[0]; - if (pathSegments[1].toLowerCase().equals(DISABLE)) { - admin.disableTable(tableName); - } else if (pathSegments[1].toLowerCase().equals(ENABLE)) { - admin.enableTable(tableName); - } - response.setStatus(202); - } - } - - public void doPut(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { - updateTable(request, response, pathSegments); - } - - public void doDelete(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { - deleteTable(request, response, pathSegments); - } - - /* - * Return region offsets. - * @param request - * @param response - */ - private void getTableRegions(HTable table, final HttpServletRequest request, - final HttpServletResponse response) - throws IOException { - // Presumption is that this.table has already been focused on target table. - byte [][] startKeys = table.getStartKeys(); - // Presumption is that this.table has already been set against target table - switch (ContentType.getContentType(request.getHeader(ACCEPT))) { - case XML: - setResponseHeader(response, startKeys.length > 0? 200: 204, - ContentType.XML.toString()); - XMLOutputter outputter = getXMLOutputter(response.getWriter()); - outputter.startTag("regions"); - for (int i = 0; i < startKeys.length; i++) { - doElement(outputter, "region", Bytes.toString(startKeys[i])); - } - outputter.endTag(); - outputter.endDocument(); - outputter.getWriter().close(); - break; - case PLAIN: - setResponseHeader(response, startKeys.length > 0? 200: 204, - ContentType.PLAIN.toString()); - PrintWriter out = response.getWriter(); - for (int i = 0; i < startKeys.length; i++) { - // TODO: Add in the server location. Is it needed? - out.print(Bytes.toString(startKeys[i])); - } - out.close(); - break; - case MIME: - default: - doNotAcceptable(response, "Unsupported Accept Header Content: " + - request.getHeader(CONTENT_TYPE)); - } - } - /* - * Get table metadata. - * @param request - * @param response - * @param tableName - * @throws IOException - */ - private void getTableMetadata(final HttpServletRequest request, - final HttpServletResponse response, final String tableName) - throws IOException { - HTableDescriptor [] tables = this.admin.listTables(); - HTableDescriptor descriptor = null; - for (int i = 0; i < tables.length; i++) { - if (Bytes.toString(tables[i].getName()).equals(tableName)) { - descriptor = tables[i]; - break; - } - } - if (descriptor == null) { - doNotFound(response, "Table not found!"); - } else { - // Presumption is that this.table has already been set against target table - ContentType type = ContentType.getContentType(request.getHeader(ACCEPT)); - switch (type) { - case XML: - setResponseHeader(response, 200, ContentType.XML.toString()); - XMLOutputter outputter = getXMLOutputter(response.getWriter()); - outputter.startTag("table"); - doElement(outputter, "name", Bytes.toString(descriptor.getName())); - outputter.startTag("columnfamilies"); - for (HColumnDescriptor e: descriptor.getFamilies()) { - outputter.startTag("columnfamily"); - doElement(outputter, "name", Bytes.toString(e.getName())); - doElement(outputter, "compression", e.getCompression().toString()); - doElement(outputter, "bloomfilter", - Boolean.toString(e.isBloomfilter())); - doElement(outputter, "max-versions", - Integer.toString(e.getMaxVersions())); - doElement(outputter, "maximum-cell-size", - Integer.toString(e.getMaxValueLength())); - outputter.endTag(); - } - outputter.endTag(); - outputter.endTag(); - outputter.endDocument(); - outputter.getWriter().close(); - break; - case PLAIN: - setResponseHeader(response, 200, ContentType.PLAIN.toString()); - PrintWriter out = response.getWriter(); - out.print(descriptor.toString()); - out.close(); - break; - case MIME: - default: - doNotAcceptable(response, "Unsupported Accept Header Content: " + - request.getHeader(CONTENT_TYPE)); - } - } - } - - private void putTable(HttpServletRequest request, - HttpServletResponse response, String[] pathSegments) - throws IOException, ServletException { - switch(ContentType.getContentType(request.getHeader(CONTENT_TYPE))) { - case XML: - putTableXml(request, response, pathSegments); - break; - case MIME: - doNotAcceptable(response, "Don't support multipart/related yet..."); - break; - default: - doNotAcceptable(response, "Unsupported Accept Header Content: " + - request.getHeader(CONTENT_TYPE)); - } - } - - private void updateTable(HttpServletRequest request, - HttpServletResponse response, String[] pathSegments) - throws IOException, ServletException { - switch(ContentType.getContentType(request.getHeader(CONTENT_TYPE))) { - case XML: - updateTableXml(request, response, pathSegments); - break; - case MIME: - doNotAcceptable(response, "Don't support multipart/related yet..."); - break; - default: - doNotAcceptable(response, "Unsupported Accept Header Content: " + - request.getHeader(CONTENT_TYPE)); - } - } - - private void deleteTable(HttpServletRequest request, - HttpServletResponse response, String[] pathSegments) - throws ServletException { - try { - String tableName = pathSegments[0]; - String[] column_params = request.getParameterValues(COLUMN); - if (column_params != null && column_params.length > 0) { - for (String column : column_params) { - admin.deleteColumn(tableName, makeColumnName(column)); - } - } else { - admin.deleteTable(tableName); - } - response.setStatus(202); - } catch (Exception e) { - throw new ServletException(e); - } - } - - private void putTableXml(HttpServletRequest - request, HttpServletResponse response, String[] pathSegments) - throws IOException, ServletException { - DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory - .newInstance(); - // ignore all comments inside the xml file - docBuilderFactory.setIgnoringComments(true); - - DocumentBuilder builder = null; - Document doc = null; - - try { - builder = docBuilderFactory.newDocumentBuilder(); - doc = builder.parse(request.getInputStream()); - } catch (javax.xml.parsers.ParserConfigurationException e) { - throw new ServletException(e); - } catch (org.xml.sax.SAXException e) { - throw new ServletException(e); - } - - try { - Node name_node = doc.getElementsByTagName("name").item(0); - String table_name = name_node.getFirstChild().getNodeValue(); - - HTableDescriptor htd = new HTableDescriptor(table_name); - NodeList columnfamily_nodes = doc.getElementsByTagName("columnfamily"); - for (int i = 0; i < columnfamily_nodes.getLength(); i++) { - Element columnfamily = (Element)columnfamily_nodes.item(i); - htd.addFamily(putColumnFamilyXml(columnfamily)); - } - admin.createTable(htd); - } catch (Exception e) { - throw new ServletException(e); - } - } - - private void updateTableXml(HttpServletRequest request, - HttpServletResponse response, String[] pathSegments) throws IOException, - ServletException { - DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory - .newInstance(); - // ignore all comments inside the xml file - docBuilderFactory.setIgnoringComments(true); - - DocumentBuilder builder = null; - Document doc = null; - - try { - builder = docBuilderFactory.newDocumentBuilder(); - doc = builder.parse(request.getInputStream()); - } catch (javax.xml.parsers.ParserConfigurationException e) { - throw new ServletException(e); - } catch (org.xml.sax.SAXException e) { - throw new ServletException(e); - } - - try { - String tableName = pathSegments[0]; - HTableDescriptor htd = admin.getTableDescriptor(tableName); - - NodeList columnfamily_nodes = doc.getElementsByTagName("columnfamily"); - - for (int i = 0; i < columnfamily_nodes.getLength(); i++) { - Element columnfamily = (Element) columnfamily_nodes.item(i); - HColumnDescriptor hcd = putColumnFamilyXml(columnfamily, htd); - if (htd.hasFamily(Bytes.toBytes(hcd.getNameAsString()))) { - admin.modifyColumn(tableName, hcd.getNameAsString(), hcd); - } else { - admin.addColumn(tableName, hcd); - } - } - } catch (Exception e) { - throw new ServletException(e); - } - } - - private HColumnDescriptor putColumnFamilyXml(Element columnfamily) { - return putColumnFamilyXml(columnfamily, null); - } - - private HColumnDescriptor putColumnFamilyXml(Element columnfamily, HTableDescriptor currentTDesp) { - Node name_node = columnfamily.getElementsByTagName("name").item(0); - String colname = makeColumnName(name_node.getFirstChild().getNodeValue()); - - int max_versions = HColumnDescriptor.DEFAULT_VERSIONS; - CompressionType compression = HColumnDescriptor.DEFAULT_COMPRESSION; - boolean in_memory = HColumnDescriptor.DEFAULT_IN_MEMORY; - boolean block_cache = HColumnDescriptor.DEFAULT_BLOCKCACHE; - int max_cell_size = HColumnDescriptor.DEFAULT_LENGTH; - int ttl = HColumnDescriptor.DEFAULT_TTL; - boolean bloomfilter = HColumnDescriptor.DEFAULT_BLOOMFILTER; - - if (currentTDesp != null) { - HColumnDescriptor currentCDesp = currentTDesp.getFamily(Bytes.toBytes(colname)); - if (currentCDesp != null) { - max_versions = currentCDesp.getMaxVersions(); - compression = currentCDesp.getCompression(); - in_memory = currentCDesp.isInMemory(); - block_cache = currentCDesp.isBlockCacheEnabled(); - max_cell_size = currentCDesp.getMaxValueLength(); - ttl = currentCDesp.getTimeToLive(); - bloomfilter = currentCDesp.isBloomfilter(); - } - } - - NodeList max_versions_list = columnfamily.getElementsByTagName("max-versions"); - if (max_versions_list.getLength() > 0) { - max_versions = Integer.parseInt(max_versions_list.item(0).getFirstChild().getNodeValue()); - } - - NodeList compression_list = columnfamily.getElementsByTagName("compression"); - if (compression_list.getLength() > 0) { - compression = CompressionType.valueOf(compression_list.item(0).getFirstChild().getNodeValue()); - } - - NodeList in_memory_list = columnfamily.getElementsByTagName("in-memory"); - if (in_memory_list.getLength() > 0) { - in_memory = Boolean.valueOf(in_memory_list.item(0).getFirstChild().getNodeValue()); - } - - NodeList block_cache_list = columnfamily.getElementsByTagName("block-cache"); - if (block_cache_list.getLength() > 0) { - block_cache = Boolean.valueOf(block_cache_list.item(0).getFirstChild().getNodeValue()); - } - - NodeList max_cell_size_list = columnfamily.getElementsByTagName("max-cell-size"); - if (max_cell_size_list.getLength() > 0) { - max_cell_size = Integer.valueOf(max_cell_size_list.item(0).getFirstChild().getNodeValue()); - } - - NodeList ttl_list = columnfamily.getElementsByTagName("time-to-live"); - if (ttl_list.getLength() > 0) { - ttl = Integer.valueOf(ttl_list.item(0).getFirstChild().getNodeValue()); - } - - NodeList bloomfilter_list = columnfamily.getElementsByTagName("bloomfilter"); - if (bloomfilter_list.getLength() > 0) { - bloomfilter = Boolean.valueOf(bloomfilter_list.item(0).getFirstChild().getNodeValue()); - } - - HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes(colname), max_versions, - compression, in_memory, block_cache, max_cell_size, ttl, bloomfilter); - - NodeList metadataList = columnfamily.getElementsByTagName("metadata"); - for (int i = 0; i < metadataList.getLength(); i++) { - Element metadataColumn = (Element)metadataList.item(i); - // extract the name and value children - Node mname_node = metadataColumn.getElementsByTagName("name").item(0); - String mname = mname_node.getFirstChild().getNodeValue(); - Node mvalue_node = metadataColumn.getElementsByTagName("value").item(0); - String mvalue = mvalue_node.getFirstChild().getNodeValue(); - hcd.setValue(mname, mvalue); - } - - return hcd; - } -} diff --git a/src/java/org/apache/hadoop/hbase/rest/TableModel.java b/src/java/org/apache/hadoop/hbase/rest/TableModel.java new file mode 100644 index 000000000000..2202474fd199 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/TableModel.java @@ -0,0 +1,280 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.io.IOException; +import java.util.ArrayList; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Scanner; +import org.apache.hadoop.hbase.io.RowResult; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.serializer.IRestSerializer; +import org.apache.hadoop.hbase.rest.serializer.ISerializable; +import org.apache.hadoop.hbase.util.Bytes; + +import agilejson.TOJSON; + +public class TableModel extends AbstractModel { + + @SuppressWarnings("unused") + private Log LOG = LogFactory.getLog(TableModel.class); + + public TableModel(HBaseConfiguration config, HBaseAdmin admin) { + super.initialize(config, admin); + } + + // Get Methods + public RowResult[] get(byte[] tableName) throws HBaseRestException { + return get(tableName, getColumns(tableName)); + } + + /** + * Returns all cells from all rows from the given table in the given columns. + * The output is in the order that the columns are given. + * + * @param tableName + * table name + * @param columnNames + * column names + * @return resultant rows + * @throws org.apache.hadoop.hbase.rest.exception.HBaseRestException + */ + public RowResult[] get(byte[] tableName, byte[][] columnNames) + throws HBaseRestException { + try { + ArrayList a = new ArrayList(); + HTable table = new HTable(tableName); + + Scanner s = table.getScanner(columnNames); + RowResult r; + + while ((r = s.next()) != null) { + a.add(r); + } + + return a.toArray(new RowResult[0]); + } catch (Exception e) { + throw new HBaseRestException(e); + } + } + + protected boolean doesTableExist(byte[] tableName) throws HBaseRestException { + try { + return this.admin.tableExists(tableName); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + protected void disableTable(byte[] tableName) throws HBaseRestException { + try { + this.admin.disableTable(tableName); + } catch (IOException e) { + throw new HBaseRestException("IOException disabling table", e); + } + } + + protected void enableTable(byte[] tableName) throws HBaseRestException { + try { + this.admin.enableTable(tableName); + } catch (IOException e) { + throw new HBaseRestException("IOException enabiling table", e); + } + } + + public boolean updateTable(String tableName, + ArrayList columns) throws HBaseRestException { + HTableDescriptor htc = null; + try { + htc = this.admin.getTableDescriptor(tableName); + } catch (IOException e) { + throw new HBaseRestException("Table does not exist"); + } + + for (HColumnDescriptor column : columns) { + if (htc.hasFamily(Bytes.toBytes(column.getNameAsString()))) { + try { + this.admin.disableTable(tableName); + this.admin.modifyColumn(tableName, column.getNameAsString(), column); + this.admin.enableTable(tableName); + } catch (IOException e) { + throw new HBaseRestException("unable to modify column " + + column.getNameAsString(), e); + } + } else { + try { + this.admin.disableTable(tableName); + this.admin.addColumn(tableName, column); + this.admin.enableTable(tableName); + } catch (IOException e) { + throw new HBaseRestException("unable to add column " + + column.getNameAsString(), e); + } + } + } + + return true; + + } + + /** + * Get table metadata. + * + * @param request + * @param response + * @param tableName + * @throws IOException + */ + public HTableDescriptor getTableMetadata(final String tableName) + throws HBaseRestException { + HTableDescriptor descriptor = null; + try { + HTableDescriptor[] tables = this.admin.listTables(); + for (int i = 0; i < tables.length; i++) { + if (Bytes.toString(tables[i].getName()).equals(tableName)) { + descriptor = tables[i]; + break; + } + } + if (descriptor == null) { + + } else { + return descriptor; + } + } catch (IOException e) { + throw new HBaseRestException("error processing request."); + } + return descriptor; + } + + /** + * Return region offsets. + * + * @param request + * @param response + */ + public Regions getTableRegions(final String tableName) + throws HBaseRestException { + try { + HTable table = new HTable(this.conf, tableName); + // Presumption is that this.table has already been focused on target + // table. + Regions regions = new Regions(table.getStartKeys()); + // Presumption is that this.table has already been set against target + // table + return regions; + } catch (IOException e) { + throw new HBaseRestException("Unable to get regions from table"); + } + } + + // Post Methods + /** + * Creates table tableName described by the json in input. + * + * @param tableName + * table name + * @param htd + * HBaseTableDescriptor for the table to be created + * + * @return true if operation does not fail due to a table with the given + * tableName not existing. + * @throws org.apache.hadoop.hbase.rest.exception.HBaseRestException + */ + public boolean post(byte[] tableName, HTableDescriptor htd) + throws HBaseRestException { + try { + if (!this.admin.tableExists(tableName)) { + this.admin.createTable(htd); + return true; + } + } catch (IOException e) { + throw new HBaseRestException(e); + } + return false; + } + + /** + * Deletes table tableName + * + * @param tableName + * name of the table. + * @return true if table exists and deleted, false if table does not exist. + * @throws org.apache.hadoop.hbase.rest.exception.HBaseRestException + */ + public boolean delete(byte[] tableName) throws HBaseRestException { + try { + if (this.admin.tableExists(tableName)) { + this.admin.disableTable(tableName); + this.admin.deleteTable(tableName); + return true; + } + return false; + } catch (Exception e) { + throw new HBaseRestException(e); + } + } + + public class Regions implements ISerializable { + byte[][] regionKey; + + public Regions(byte[][] bs) { + super(); + this.regionKey = bs; + } + + @SuppressWarnings("unused") + private Regions() { + } + + /** + * @return the regionKey + */ + @TOJSON(fieldName = "region") + public byte[][] getRegionKey() { + return regionKey; + } + + /** + * @param regionKey + * the regionKey to set + */ + public void setRegionKey(byte[][] regionKey) { + this.regionKey = regionKey; + } + + /* + * (non-Javadoc) + * + * @see org.apache.hadoop.hbase.rest.xml.IOutputXML#toXML() + */ + public void restSerialize(IRestSerializer serializer) + throws HBaseRestException { + serializer.serializeRegionData(this); + } + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/TimestampController.java b/src/java/org/apache/hadoop/hbase/rest/TimestampController.java new file mode 100644 index 000000000000..d43b1bb6eb49 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/TimestampController.java @@ -0,0 +1,139 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.parser.IHBaseRestParser; +import org.apache.hadoop.hbase.util.Bytes; + +public class TimestampController extends AbstractController { + + @SuppressWarnings("unused") + private Log LOG = LogFactory.getLog(TimestampController.class); + + protected TimestampModel getModel() { + return (TimestampModel) model; + } + + @Override + protected AbstractModel generateModel( + @SuppressWarnings("hiding") HBaseConfiguration conf, HBaseAdmin admin) { + return new TimestampModel(conf, admin); + } + + @Override + public void get(Status s, byte[][] pathSegments, + Map queryMap) throws HBaseRestException { + TimestampModel innerModel = getModel(); + + byte[] tableName; + byte[] rowName; + long timestamp; + + tableName = pathSegments[0]; + rowName = pathSegments[2]; + timestamp = Bytes.toLong(pathSegments[3]); + + if (queryMap.size() == 0) { + s.setOK(innerModel.get(tableName, rowName, timestamp)); + } else { + // get the column names if any were passed in + String[] column_params = queryMap.get(RESTConstants.COLUMN); + byte[][] columns = null; + + if (column_params != null && column_params.length > 0) { + List available_columns = new ArrayList(); + for (String column_param : column_params) { + available_columns.add(column_param); + } + columns = Bytes.toByteArrays(available_columns.toArray(new String[0])); + } + s.setOK(innerModel.get(tableName, rowName, columns, timestamp)); + } + s.respond(); + } + + @Override + public void post(Status s, byte[][] pathSegments, + Map queryMap, byte[] input, IHBaseRestParser parser) + throws HBaseRestException { + TimestampModel innerModel = getModel(); + + byte[] tableName; + byte[] rowName; + byte[] columnName; + long timestamp; + + tableName = pathSegments[0]; + rowName = pathSegments[1]; + columnName = pathSegments[2]; + timestamp = Bytes.toLong(pathSegments[3]); + + try { + if (queryMap.size() == 0) { + innerModel.post(tableName, rowName, columnName, timestamp, input); + s.setOK(); + } else { + s.setUnsupportedMediaType("Unknown Query."); + } + } catch (HBaseRestException e) { + s.setUnsupportedMediaType(e.getMessage()); + } + s.respond(); + } + + @Override + public void put(Status s, byte[][] pathSegments, + Map queryMap, byte[] input, IHBaseRestParser parser) + throws HBaseRestException { + throw new UnsupportedOperationException("Not supported yet."); + } + + @Override + public void delete(Status s, byte[][] pathSegments, + Map queryMap) throws HBaseRestException { + TimestampModel innerModel = getModel(); + + byte[] tableName; + byte[] rowName; + long timestamp; + + tableName = pathSegments[0]; + rowName = pathSegments[2]; + timestamp = Bytes.toLong(pathSegments[3]); + + if (queryMap.size() == 0) { + innerModel.delete(tableName, rowName, timestamp); + } else { + innerModel.delete(tableName, rowName, this + .getColumnsFromQueryMap(queryMap), timestamp); + } + s.setAccepted(); + s.respond(); + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/TimestampModel.java b/src/java/org/apache/hadoop/hbase/rest/TimestampModel.java new file mode 100644 index 000000000000..65dff21752dd --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/TimestampModel.java @@ -0,0 +1,126 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.io.IOException; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.io.BatchUpdate; +import org.apache.hadoop.hbase.io.Cell; +import org.apache.hadoop.hbase.io.RowResult; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; + +public class TimestampModel extends AbstractModel { + + @SuppressWarnings("unused") + private Log LOG = LogFactory.getLog(TimestampModel.class); + + public TimestampModel(HBaseConfiguration conf, HBaseAdmin admin) { + super.initialize(conf, admin); + } + + public void delete(byte[] tableName, byte[] rowName, long timestamp) + throws HBaseRestException { + try { + HTable table = new HTable(tableName); + table.deleteAll(rowName, timestamp); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public void delete(byte[] tableName, byte[] rowName, byte[][] columns, + long timestamp) throws HBaseRestException { + try { + HTable table = new HTable(tableName); + for (byte[] column : columns) { + table.deleteAll(rowName, column, timestamp); + } + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public Cell get(byte[] tableName, byte[] rowName, byte[] columnName, + long timestamp) throws HBaseRestException { + try { + HTable table = new HTable(tableName); + return table.get(rowName, columnName, timestamp, 1)[0]; + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public Cell[] get(byte[] tableName, byte[] rowName, byte[] columnName, + long timestamp, int numVersions) throws HBaseRestException { + try { + HTable table = new HTable(tableName); + return table.get(rowName, columnName, timestamp, numVersions); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public RowResult get(byte[] tableName, byte[] rowName, byte[][] columns, + long timestamp) throws HBaseRestException { + try { + HTable table = new HTable(tableName); + return table.getRow(rowName, columns, timestamp); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + /** + * @param tableName + * @param rowName + * @param timestamp + * @return + * @throws HBaseRestException + */ + public RowResult get(byte[] tableName, byte[] rowName, long timestamp) + throws HBaseRestException { + try { + HTable table = new HTable(tableName); + return table.getRow(rowName, timestamp); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public void post(byte[] tableName, byte[] rowName, byte[] columnName, + long timestamp, byte[] value) throws HBaseRestException { + try { + HTable table; + BatchUpdate b; + + table = new HTable(tableName); + b = new BatchUpdate(rowName, timestamp); + + b.put(columnName, value); + table.commit(b); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/descriptors/RestCell.java b/src/java/org/apache/hadoop/hbase/rest/descriptors/RestCell.java new file mode 100644 index 000000000000..1430396cbc5a --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/descriptors/RestCell.java @@ -0,0 +1,103 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.descriptors; + +import org.apache.hadoop.hbase.io.Cell; + +import agilejson.TOJSON; + +/** + * + */ +public class RestCell extends Cell { + + byte[] name; + + + + /** + * + */ + public RestCell() { + super(); + // TODO Auto-generated constructor stub + } + + /** + * + */ + public RestCell(byte[] name, Cell cell) { + super(cell.getValue(), cell.getTimestamp()); + this.name = name; + } + + /** + * @param value + * @param timestamp + */ + public RestCell(byte[] value, long timestamp) { + super(value, timestamp); + // TODO Auto-generated constructor stub + } + + /** + * @param vals + * @param ts + */ + public RestCell(byte[][] vals, long[] ts) { + super(vals, ts); + // TODO Auto-generated constructor stub + } + + /** + * @param value + * @param timestamp + */ + public RestCell(String value, long timestamp) { + super(value, timestamp); + // TODO Auto-generated constructor stub + } + + /** + * @param vals + * @param ts + */ + public RestCell(String[] vals, long[] ts) { + super(vals, ts); + // TODO Auto-generated constructor stub + } + + /** + * @return the name + */ + @TOJSON(base64=true) + public byte[] getName() { + return name; + } + + /** + * @param name the name to set + */ + public void setName(byte[] name) { + this.name = name; + } + + +} diff --git a/src/java/org/apache/hadoop/hbase/rest/descriptors/RowUpdateDescriptor.java b/src/java/org/apache/hadoop/hbase/rest/descriptors/RowUpdateDescriptor.java new file mode 100644 index 000000000000..44010555fd7d --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/descriptors/RowUpdateDescriptor.java @@ -0,0 +1,74 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.descriptors; + +import java.util.HashMap; +import java.util.Map; + +/** + * + */ +public class RowUpdateDescriptor { + private String tableName; + private String rowName; + private Map colvals = new HashMap(); + + public RowUpdateDescriptor(String tableName, String rowName) { + this.tableName = tableName; + this.rowName = rowName; + } + + public RowUpdateDescriptor() {} + + /** + * @return the tableName + */ + public String getTableName() { + return tableName; + } + + /** + * @param tableName the tableName to set + */ + public void setTableName(String tableName) { + this.tableName = tableName; + } + + /** + * @return the rowName + */ + public String getRowName() { + return rowName; + } + + /** + * @param rowName the rowName to set + */ + public void setRowName(String rowName) { + this.rowName = rowName; + } + + /** + * @return the test + */ + public Map getColVals() { + return colvals; + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/descriptors/ScannerDescriptor.java b/src/java/org/apache/hadoop/hbase/rest/descriptors/ScannerDescriptor.java new file mode 100644 index 000000000000..2cddabe44a1a --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/descriptors/ScannerDescriptor.java @@ -0,0 +1,130 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.descriptors; + +/** + * + */ +public class ScannerDescriptor { + byte[][] columns; + long timestamp; + byte[] startRow; + byte[] stopRow; + String filters; + + /** + * @param columns + * @param timestamp + * @param startRow + * @param stopRow + * @param filters + */ + public ScannerDescriptor(byte[][] columns, long timestamp, byte[] startRow, + byte[] stopRow, String filters) { + super(); + this.columns = columns; + this.timestamp = timestamp; + this.startRow = startRow; + this.stopRow = stopRow; + this.filters = filters; + + if(this.startRow == null) { + this.startRow = new byte[0]; + } + if(this.stopRow == null) { + this.stopRow = new byte[0]; + } + } + + /** + * @return the columns + */ + public byte[][] getColumns() { + return columns; + } + + /** + * @param columns + * the columns to set + */ + public void setColumns(byte[][] columns) { + this.columns = columns; + } + + /** + * @return the timestamp + */ + public long getTimestamp() { + return timestamp; + } + + /** + * @param timestamp + * the timestamp to set + */ + public void setTimestamp(long timestamp) { + this.timestamp = timestamp; + } + + /** + * @return the startRow + */ + public byte[] getStartRow() { + return startRow; + } + + /** + * @param startRow + * the startRow to set + */ + public void setStartRow(byte[] startRow) { + this.startRow = startRow; + } + + /** + * @return the stopRow + */ + public byte[] getStopRow() { + return stopRow; + } + + /** + * @param stopRow + * the stopRow to set + */ + public void setStopRow(byte[] stopRow) { + this.stopRow = stopRow; + } + + /** + * @return the filters + */ + public String getFilters() { + return filters; + } + + /** + * @param filters + * the filters to set + */ + public void setFilters(String filters) { + this.filters = filters; + } +} \ No newline at end of file diff --git a/src/java/org/apache/hadoop/hbase/rest/descriptors/ScannerIdentifier.java b/src/java/org/apache/hadoop/hbase/rest/descriptors/ScannerIdentifier.java new file mode 100644 index 000000000000..168472afa7b1 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/descriptors/ScannerIdentifier.java @@ -0,0 +1,96 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.descriptors; + +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.serializer.IRestSerializer; +import org.apache.hadoop.hbase.rest.serializer.ISerializable; + +import agilejson.TOJSON; + +/** + * + */ +public class ScannerIdentifier implements ISerializable { + Integer id; + Long numRows; + + /** + * @param id + */ + public ScannerIdentifier(Integer id) { + super(); + this.id = id; + } + + /** + * @param id + * @param numRows + */ + public ScannerIdentifier(Integer id, Long numRows) { + super(); + this.id = id; + this.numRows = numRows; + } + + /** + * @return the id + */ + @TOJSON + public Integer getId() { + return id; + } + + /** + * @param id + * the id to set + */ + public void setId(Integer id) { + this.id = id; + } + + /** + * @return the numRows + */ + public Long getNumRows() { + return numRows; + } + + /** + * @param numRows + * the numRows to set + */ + public void setNumRows(Long numRows) { + this.numRows = numRows; + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.xml.IOutputXML#toXML(org.apache.hadoop.hbase + * .rest.serializer.IRestSerializer) + */ + public void restSerialize(IRestSerializer serializer) + throws HBaseRestException { + serializer.serializeScannerIdentifier(this); + } + +} diff --git a/src/java/org/apache/hadoop/hbase/rest/descriptors/TimestampsDescriptor.java b/src/java/org/apache/hadoop/hbase/rest/descriptors/TimestampsDescriptor.java new file mode 100644 index 000000000000..9125c807a85a --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/descriptors/TimestampsDescriptor.java @@ -0,0 +1,67 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.descriptors; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.serializer.IRestSerializer; +import org.apache.hadoop.hbase.rest.serializer.ISerializable; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * + */ +public class TimestampsDescriptor implements ISerializable { + Map timestamps = new HashMap(); + + public void add(long timestamp, byte[] tableName, byte[] rowName) { + StringBuilder sb = new StringBuilder(); + sb.append('/'); + sb.append(Bytes.toString(tableName)); + sb.append("/row/"); + sb.append(Bytes.toString(rowName)); + sb.append('/'); + sb.append(timestamp); + + timestamps.put(timestamp, sb.toString()); + } + + /** + * @return the timestamps + */ + public Map getTimestamps() { + return timestamps; + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.ISerializable#restSerialize(org + * .apache.hadoop.hbase.rest.serializer.IRestSerializer) + */ + public void restSerialize(IRestSerializer serializer) + throws HBaseRestException { + serializer.serializeTimestamps(this); + } + +} diff --git a/src/java/org/apache/hadoop/hbase/rest/exception/HBaseRestException.java b/src/java/org/apache/hadoop/hbase/rest/exception/HBaseRestException.java new file mode 100644 index 000000000000..a938534fd99c --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/exception/HBaseRestException.java @@ -0,0 +1,86 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.exception; + +import agilejson.TOJSON; + +public class HBaseRestException extends Exception { + + /** + * + */ + private static final long serialVersionUID = 8481585437124298646L; + private Exception innerException; + private String innerClass; + private String innerMessage; + + public HBaseRestException() { + + } + + public HBaseRestException(Exception e) throws HBaseRestException { + if (HBaseRestException.class.isAssignableFrom(e.getClass())) { + throw ((HBaseRestException) e); + } + setInnerException(e); + innerClass = e.getClass().toString(); + innerMessage = e.getMessage(); + } + + /** + * @param message + */ + public HBaseRestException(String message) { + super(message); + innerMessage = message; + } + + public HBaseRestException(String message, Exception exception) { + super(message, exception); + setInnerException(exception); + innerClass = exception.getClass().toString(); + innerMessage = message; + } + + @TOJSON + public String getInnerClass() { + return this.innerClass; + } + + @TOJSON + public String getInnerMessage() { + return this.innerMessage; + } + + /** + * @param innerException + * the innerException to set + */ + public void setInnerException(Exception innerException) { + this.innerException = innerException; + } + + /** + * @return the innerException + */ + public Exception getInnerException() { + return innerException; + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/filter/ColumnValueFilterFactory.java b/src/java/org/apache/hadoop/hbase/rest/filter/ColumnValueFilterFactory.java new file mode 100644 index 000000000000..7af652d4cf27 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/filter/ColumnValueFilterFactory.java @@ -0,0 +1,66 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.filter; + +import org.apache.hadoop.hbase.filter.ColumnValueFilter; +import org.apache.hadoop.hbase.filter.RowFilterInterface; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.json.JSONException; +import org.json.JSONObject; + +/** + * FilterFactory that constructs a ColumnValueFilter from a JSON arg String. + * Expects a Stringified JSON argument with the following form: + * + * { "column_name" : "MY_COLUMN_NAME", "compare_op" : "INSERT_COMPARE_OP_HERE", + * "value" : "MY_COMPARE_VALUE" } + * + * The current valid compare ops are: equal, greater, greater_or_equal, less, + * less_or_equal, not_equal + */ +public class ColumnValueFilterFactory implements FilterFactory { + + public RowFilterInterface getFilterFromJSON(String args) + throws HBaseRestException { + JSONObject innerJSON; + String columnName; + String compareOp; + String value; + + try { + innerJSON = new JSONObject(args); + } catch (JSONException e) { + throw new HBaseRestException(e); + } + + if ((columnName = innerJSON.optString(COLUMN_NAME)) == null) { + throw new MalformedFilterException(); + } + if ((compareOp = innerJSON.optString(COMPARE_OP)) == null) { + throw new MalformedFilterException(); + } + if ((value = innerJSON.optString(VALUE)) == null) { + throw new MalformedFilterException(); + } + + return new ColumnValueFilter(columnName.getBytes(), + ColumnValueFilter.CompareOp.valueOf(compareOp), value.getBytes()); + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/filter/FilterFactory.java b/src/java/org/apache/hadoop/hbase/rest/filter/FilterFactory.java new file mode 100644 index 000000000000..00803c12a337 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/filter/FilterFactory.java @@ -0,0 +1,71 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.filter; + +import org.apache.hadoop.hbase.filter.RowFilterInterface; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; + +/** + * Constructs Filters from JSON. Filters are defined + * as JSON Objects of the form: + * { + * "type" : "FILTER_CLASS_NAME", + * "args" : "FILTER_ARGUMENTS" + * } + * + * For Filters like WhileMatchRowFilter, + * nested Filters are supported. Just serialize a different + * filter in the form (for instance if you wanted to use WhileMatchRowFilter + * with a StopRowFilter: + * + * { + * "type" : "WhileMatchRowFilter", + * "args" : { + * "type" : "StopRowFilter", + * "args" : "ROW_KEY_TO_STOP_ON" + * } + * } + * + * For filters like RowSetFilter, nested Filters AND Filter arrays + * are supported. So for instance If one wanted to do a RegExp + * RowFilter UNIONed with a WhileMatchRowFilter(StopRowFilter), + * you would look like this: + * + * { + * "type" : "RowFilterSet", + * "args" : [ + * { + * "type" : "RegExpRowFilter", + * "args" : "MY_REGULAR_EXPRESSION" + * }, + * { + * "type" : "WhileMatchRowFilter" + * "args" : { + * "type" : "StopRowFilter" + * "args" : "MY_STOP_ROW_EXPRESSION" + * } + * } + * ] + * } + */ +public interface FilterFactory extends FilterFactoryConstants { + public RowFilterInterface getFilterFromJSON(String args) + throws HBaseRestException; +} diff --git a/src/java/org/apache/hadoop/hbase/rest/filter/FilterFactoryConstants.java b/src/java/org/apache/hadoop/hbase/rest/filter/FilterFactoryConstants.java new file mode 100644 index 000000000000..e41d0d77dc57 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/filter/FilterFactoryConstants.java @@ -0,0 +1,41 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.filter; + +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; + +public interface FilterFactoryConstants { + static String TYPE = "type"; + static String ARGUMENTS = "args"; + static String COLUMN_NAME = "column_name"; + static String COMPARE_OP = "compare_op"; + static String VALUE = "value"; + + static class MalformedFilterException extends HBaseRestException { + + public MalformedFilterException() { + } + + @Override + public String toString() { + return "malformed filter expression"; + } + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/filter/InclusiveStopRowFilterFactory.java b/src/java/org/apache/hadoop/hbase/rest/filter/InclusiveStopRowFilterFactory.java new file mode 100644 index 000000000000..65392135c251 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/filter/InclusiveStopRowFilterFactory.java @@ -0,0 +1,37 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.filter; + +import org.apache.hadoop.hbase.filter.InclusiveStopRowFilter; +import org.apache.hadoop.hbase.filter.RowFilterInterface; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * FilterFactory that construct a InclusiveStopRowFilter + * from a JSON argument String. + * + * It expects that the whole input string consists of only + * the rowKey that you wish to stop on. + */ +public class InclusiveStopRowFilterFactory implements FilterFactory { + public RowFilterInterface getFilterFromJSON(String args) { + return new InclusiveStopRowFilter(Bytes.toBytes(args)); + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/filter/PageRowFilterFactory.java b/src/java/org/apache/hadoop/hbase/rest/filter/PageRowFilterFactory.java new file mode 100644 index 000000000000..35b8a4d63f8e --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/filter/PageRowFilterFactory.java @@ -0,0 +1,34 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.filter; + +import org.apache.hadoop.hbase.filter.PageRowFilter; +import org.apache.hadoop.hbase.filter.RowFilterInterface; + +/** + * Constructs a PageRowFilter from a JSON argument String. + * Expects the entire JSON argument string to consist + * of the long that is the length of the pages that you want. + */ +public class PageRowFilterFactory implements FilterFactory { + public RowFilterInterface getFilterFromJSON(String args) { + return new PageRowFilter(Long.parseLong(args)); + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/filter/RegExpRowFilterFactory.java b/src/java/org/apache/hadoop/hbase/rest/filter/RegExpRowFilterFactory.java new file mode 100644 index 000000000000..df72f30acdc1 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/filter/RegExpRowFilterFactory.java @@ -0,0 +1,34 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.filter; + +import org.apache.hadoop.hbase.filter.RegExpRowFilter; +import org.apache.hadoop.hbase.filter.RowFilterInterface; + +/** + * Constructs a RegExpRowFilter from a JSON argument string. + * Expects the entire JSON arg string to consist of the + * entire regular expression to be used. + */ +public class RegExpRowFilterFactory implements FilterFactory { + public RowFilterInterface getFilterFromJSON(String args) { + return new RegExpRowFilter(args); + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/filter/RowFilterSetFactory.java b/src/java/org/apache/hadoop/hbase/rest/filter/RowFilterSetFactory.java new file mode 100644 index 000000000000..603ad64b2d49 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/filter/RowFilterSetFactory.java @@ -0,0 +1,115 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.filter; + +import java.util.HashSet; +import java.util.Set; +import org.apache.hadoop.hbase.filter.RowFilterInterface; +import org.apache.hadoop.hbase.filter.RowFilterSet; +import org.apache.hadoop.hbase.rest.RESTConstants; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.filter.FilterFactoryConstants.MalformedFilterException; +import org.json.JSONArray; +import org.json.JSONException; +import org.json.JSONObject; + +/** + * Constructs a RowFilterSet from a JSON argument String. + * + * Assumes that the input is a JSONArray consisting of JSON Object version of + * the filters that you wish to mash together in an AND statement. + * + * The Syntax for the individual inner filters are defined by their respective + * FilterFactory. If a filter factory for said Factory does not exist, a + * MalformedFilterJSONException will be thrown. + * + * Currently OR Statements are not supported even though at a later iteration + * they could be supported easily. + */ +public class RowFilterSetFactory implements FilterFactory { + + public RowFilterInterface getFilterFromJSON(String args) + throws HBaseRestException { + JSONArray filterArray; + Set set; + JSONObject filter; + + try { + filterArray = new JSONArray(args); + } catch (JSONException e) { + throw new HBaseRestException(e); + } + + // If only 1 Row, just return the row. + if (filterArray.length() == 1) { + return getRowFilter(filterArray.optJSONObject(0)); + } + + // Otherwise continue + set = new HashSet(); + + for (int i = 0; i < filterArray.length(); i++) { + + // Get FIlter Object + if ((filter = filterArray.optJSONObject(i)) == null) { + throw new MalformedFilterException(); + } + + // Add newly constructed filter to the filter set; + set.add(getRowFilter(filter)); + } + + // Put set into a RowFilterSet and return. + return new RowFilterSet(set); + } + + /** + * A refactored method that encapsulates the creation of a RowFilter given a + * JSONObject with a correct form of: { "type" : "MY_TYPE", "args" : MY_ARGS, + * } + * + * @param filter + * @return + * @throws org.apache.hadoop.hbase.rest.exception.HBaseRestException + */ + protected RowFilterInterface getRowFilter(JSONObject filter) + throws HBaseRestException { + FilterFactory f; + String filterType; + String filterArgs; + + // Get Filter's Type + if ((filterType = filter.optString(FilterFactoryConstants.TYPE)) == null) { + throw new MalformedFilterException(); + } + + // Get Filter Args + if ((filterArgs = filter.optString(FilterFactoryConstants.ARGUMENTS)) == null) { + throw new MalformedFilterException(); + } + + // Get Filter Factory for given Filter Type + if ((f = RESTConstants.filterFactories.get(filterType)) == null) { + throw new MalformedFilterException(); + } + + return f.getFilterFromJSON(filterArgs); + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/filter/StopRowFilterFactory.java b/src/java/org/apache/hadoop/hbase/rest/filter/StopRowFilterFactory.java new file mode 100644 index 000000000000..28caaf62f4b3 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/filter/StopRowFilterFactory.java @@ -0,0 +1,37 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.filter; + +import org.apache.hadoop.hbase.filter.RowFilterInterface; +import org.apache.hadoop.hbase.filter.StopRowFilter; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * FilterFactory that construct a StopRowFilter + * from an Argument String. + * + * It expects that the whole input string consists of only + * the rowKey that you wish to stop on. + */ +public class StopRowFilterFactory implements FilterFactory { + public RowFilterInterface getFilterFromJSON(String args) { + return new StopRowFilter(Bytes.toBytes(args)); + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/filter/WhileMatchRowFilterFactory.java b/src/java/org/apache/hadoop/hbase/rest/filter/WhileMatchRowFilterFactory.java new file mode 100644 index 000000000000..bdb2a255916c --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/filter/WhileMatchRowFilterFactory.java @@ -0,0 +1,61 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.filter; + +import org.apache.hadoop.hbase.filter.RowFilterInterface; +import org.apache.hadoop.hbase.filter.WhileMatchRowFilter; +import org.apache.hadoop.hbase.rest.RESTConstants; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.json.JSONException; +import org.json.JSONObject; + +/** + * Factory to produce WhileMatchRowFilters from JSON + * Expects as an arguement a valid JSON Object in + * String form of another RowFilterInterface. + */ +public class WhileMatchRowFilterFactory implements FilterFactory { + public RowFilterInterface getFilterFromJSON(String args) + throws HBaseRestException { + JSONObject innerFilterJSON; + FilterFactory f; + String innerFilterType; + String innerFilterArgs; + + try { + innerFilterJSON = new JSONObject(args); + } catch (JSONException e) { + throw new HBaseRestException(e); + } + + // Check if filter is correct + if ((innerFilterType = innerFilterJSON.optString(TYPE)) == null) + throw new MalformedFilterException(); + if ((innerFilterArgs = innerFilterJSON.optString(ARGUMENTS)) == null) + throw new MalformedFilterException(); + + if ((f = RESTConstants.filterFactories.get(innerFilterType)) == null) + throw new MalformedFilterException(); + + RowFilterInterface innerFilter = f.getFilterFromJSON(innerFilterArgs); + + return new WhileMatchRowFilter(innerFilter); + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/parser/HBaseRestParserFactory.java b/src/java/org/apache/hadoop/hbase/rest/parser/HBaseRestParserFactory.java new file mode 100644 index 000000000000..8247127410ff --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/parser/HBaseRestParserFactory.java @@ -0,0 +1,56 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.parser; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.hadoop.hbase.rest.Dispatcher.ContentType; + +/** + * + */ +public class HBaseRestParserFactory { + + private static final Map> parserMap = + new HashMap>(); + + static { + parserMap.put(ContentType.XML, XMLRestParser.class); + parserMap.put(ContentType.JSON, JsonRestParser.class); + } + + public static IHBaseRestParser getParser(ContentType ct) { + IHBaseRestParser parser = null; + + Class clazz = parserMap.get(ct); + try { + parser = (IHBaseRestParser) clazz.newInstance(); + } catch (InstantiationException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } catch (IllegalAccessException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + + return parser; + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/parser/IHBaseRestParser.java b/src/java/org/apache/hadoop/hbase/rest/parser/IHBaseRestParser.java new file mode 100644 index 000000000000..5663a15a21c8 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/parser/IHBaseRestParser.java @@ -0,0 +1,52 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.parser; + +import java.util.ArrayList; + +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.rest.descriptors.RowUpdateDescriptor; +import org.apache.hadoop.hbase.rest.descriptors.ScannerDescriptor; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; + +/** + * + */ +public interface IHBaseRestParser { + /** + * Parses a HTableDescriptor given the input array. + * + * @param input + * @return + * @throws HBaseRestException + */ + public HTableDescriptor getTableDescriptor(byte[] input) + throws HBaseRestException; + + public ArrayList getColumnDescriptors(byte[] input) + throws HBaseRestException; + + public ScannerDescriptor getScannerDescriptor(byte[] input) + throws HBaseRestException; + + public RowUpdateDescriptor getRowUpdateDescriptor(byte[] input, + byte[][] pathSegments) throws HBaseRestException; +} diff --git a/src/java/org/apache/hadoop/hbase/rest/parser/JsonRestParser.java b/src/java/org/apache/hadoop/hbase/rest/parser/JsonRestParser.java new file mode 100644 index 000000000000..e1f1180dee58 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/parser/JsonRestParser.java @@ -0,0 +1,235 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.parser; + +import java.util.ArrayList; + +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.rest.RESTConstants; +import org.apache.hadoop.hbase.rest.descriptors.RowUpdateDescriptor; +import org.apache.hadoop.hbase.rest.descriptors.ScannerDescriptor; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.util.Bytes; +import org.json.JSONArray; +import org.json.JSONException; +import org.json.JSONObject; + +/** + * + */ +public class JsonRestParser implements IHBaseRestParser { + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.parser.IHBaseRestParser#getTableDescriptor + * (byte[]) + */ + public HTableDescriptor getTableDescriptor(byte[] input) + throws HBaseRestException { + try { + JSONObject o; + HTableDescriptor h; + JSONArray columnDescriptorArray; + o = new JSONObject(new String(input)); + columnDescriptorArray = o.getJSONArray("column_families"); + h = new HTableDescriptor(o.getString("name")); + + for (int i = 0; i < columnDescriptorArray.length(); i++) { + JSONObject json_columnDescriptor = columnDescriptorArray + .getJSONObject(i); + h.addFamily(this.getColumnDescriptor(json_columnDescriptor)); + } + return h; + } catch (Exception e) { + throw new HBaseRestException(e); + } + } + + private HColumnDescriptor getColumnDescriptor(JSONObject jsonObject) + throws JSONException { + String strTemp; + strTemp = jsonObject.getString("name"); + if (strTemp.charAt(strTemp.length() - 1) != ':') { + strTemp += ":"; + } + + byte[] name = Bytes.toBytes(strTemp); + + int maxVersions; + HColumnDescriptor.CompressionType cType; + boolean inMemory; + boolean blockCacheEnabled; + int maxValueLength; + int timeToLive; + boolean bloomfilter; + + try { + bloomfilter = jsonObject.getBoolean("bloomfilter"); + } catch (JSONException e) { + bloomfilter = false; + } + + try { + maxVersions = jsonObject.getInt("max_versions"); + } catch (JSONException e) { + maxVersions = 3; + } + + try { + cType = HColumnDescriptor.CompressionType.valueOf(jsonObject + .getString("compression_type")); + } catch (JSONException e) { + cType = HColumnDescriptor.CompressionType.NONE; + } + + try { + inMemory = jsonObject.getBoolean("in_memory"); + } catch (JSONException e) { + inMemory = false; + } + + try { + blockCacheEnabled = jsonObject.getBoolean("block_cache_enabled"); + } catch (JSONException e) { + blockCacheEnabled = false; + } + + try { + maxValueLength = jsonObject.getInt("max_value_length"); + } catch (JSONException e) { + maxValueLength = 2147483647; + } + + try { + timeToLive = jsonObject.getInt("time_to_live"); + } catch (JSONException e) { + timeToLive = Integer.MAX_VALUE; + } + + return new HColumnDescriptor(name, maxVersions, cType, inMemory, + blockCacheEnabled, maxValueLength, timeToLive, bloomfilter); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.parser.IHBaseRestParser#getColumnDescriptors + * (byte[]) + */ + public ArrayList getColumnDescriptors(byte[] input) + throws HBaseRestException { + ArrayList columns = new ArrayList(); + try { + JSONObject o; + JSONArray columnDescriptorArray; + o = new JSONObject(new String(input)); + columnDescriptorArray = o.getJSONArray("column_families"); + + for (int i = 0; i < columnDescriptorArray.length(); i++) { + JSONObject json_columnDescriptor = columnDescriptorArray + .getJSONObject(i); + columns.add(this.getColumnDescriptor(json_columnDescriptor)); + } + } catch (JSONException e) { + throw new HBaseRestException("Error Parsing json input", e); + } + + return columns; + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.parser.IHBaseRestParser#getScannerDescriptor + * (byte[]) + */ + public ScannerDescriptor getScannerDescriptor(byte[] input) + throws HBaseRestException { + JSONObject scannerDescriptor; + JSONArray columnArray; + + byte[][] columns = null; + long timestamp; + byte[] startRow; + byte[] stopRow; + String filters; + + try { + scannerDescriptor = new JSONObject(new String(input)); + + columnArray = scannerDescriptor.optJSONArray(RESTConstants.COLUMNS); + timestamp = scannerDescriptor.optLong(RESTConstants.SCANNER_TIMESTAMP); + startRow = Bytes.toBytes(scannerDescriptor.optString( + RESTConstants.SCANNER_START_ROW, "")); + stopRow = Bytes.toBytes(scannerDescriptor.optString( + RESTConstants.SCANNER_STOP_ROW, "")); + filters = scannerDescriptor.optString(RESTConstants.SCANNER_FILTER); + + if (columnArray != null) { + columns = new byte[columnArray.length()][]; + for (int i = 0; i < columnArray.length(); i++) { + columns[i] = Bytes.toBytes(columnArray.optString(i)); + } + } + + return new ScannerDescriptor(columns, timestamp, startRow, stopRow, + filters); + } catch (JSONException e) { + throw new HBaseRestException("error parsing json string", e); + } + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.parser.IHBaseRestParser#getRowUpdateDescriptor + * (byte[], byte[][]) + */ + public RowUpdateDescriptor getRowUpdateDescriptor(byte[] input, + byte[][] pathSegments) throws HBaseRestException { + + RowUpdateDescriptor rud = new RowUpdateDescriptor(); + JSONArray a; + + rud.setTableName(Bytes.toString(pathSegments[0])); + rud.setRowName(Bytes.toString(pathSegments[2])); + + try { + JSONObject updateObject = new JSONObject(new String(input)); + a = updateObject.getJSONArray(RESTConstants.COLUMNS); + for (int i = 0; i < a.length(); i++) { + rud.getColVals().put( + Bytes.toBytes(a.getJSONObject(i).getString(RESTConstants.NAME)), + org.apache.hadoop.hbase.util.Base64.decode(a.getJSONObject(i) + .getString(RESTConstants.VALUE))); + } + } catch (JSONException e) { + throw new HBaseRestException("Error parsing row update json", e); + } + return rud; + } + +} diff --git a/src/java/org/apache/hadoop/hbase/rest/parser/XMLRestParser.java b/src/java/org/apache/hadoop/hbase/rest/parser/XMLRestParser.java new file mode 100644 index 000000000000..a8037e85b5fd --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/parser/XMLRestParser.java @@ -0,0 +1,291 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.parser; + +import java.io.ByteArrayInputStream; +import java.util.ArrayList; + +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; + +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.HColumnDescriptor.CompressionType; +import org.apache.hadoop.hbase.rest.RESTConstants; +import org.apache.hadoop.hbase.rest.descriptors.RowUpdateDescriptor; +import org.apache.hadoop.hbase.rest.descriptors.ScannerDescriptor; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.thrift.generated.Hbase; +import org.apache.hadoop.hbase.util.Bytes; +import org.w3c.dom.Document; +import org.w3c.dom.Element; +import org.w3c.dom.Node; +import org.w3c.dom.NodeList; + +/** + * + */ +public class XMLRestParser implements IHBaseRestParser { + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.parser.IHBaseRestParser#getTableDescriptor + * (byte[]) + */ + public HTableDescriptor getTableDescriptor(byte[] input) + throws HBaseRestException { + DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory + .newInstance(); + docBuilderFactory.setIgnoringComments(true); + + DocumentBuilder builder = null; + Document doc = null; + HTableDescriptor htd = null; + + try { + builder = docBuilderFactory.newDocumentBuilder(); + ByteArrayInputStream is = new ByteArrayInputStream(input); + doc = builder.parse(is); + } catch (Exception e) { + throw new HBaseRestException(e); + } + + try { + Node name_node = doc.getElementsByTagName("name").item(0); + String table_name = name_node.getFirstChild().getNodeValue(); + + htd = new HTableDescriptor(table_name); + NodeList columnfamily_nodes = doc.getElementsByTagName("columnfamily"); + for (int i = 0; i < columnfamily_nodes.getLength(); i++) { + Element columnfamily = (Element) columnfamily_nodes.item(i); + htd.addFamily(this.getColumnDescriptor(columnfamily)); + } + } catch (Exception e) { + throw new HBaseRestException(e); + } + return htd; + } + + public HColumnDescriptor getColumnDescriptor(Element columnfamily) { + return this.getColumnDescriptor(columnfamily, null); + } + + private HColumnDescriptor getColumnDescriptor(Element columnfamily, + HTableDescriptor currentTDesp) { + Node name_node = columnfamily.getElementsByTagName("name").item(0); + String colname = makeColumnName(name_node.getFirstChild().getNodeValue()); + + int max_versions = HColumnDescriptor.DEFAULT_VERSIONS; + CompressionType compression = HColumnDescriptor.DEFAULT_COMPRESSION; + boolean in_memory = HColumnDescriptor.DEFAULT_IN_MEMORY; + boolean block_cache = HColumnDescriptor.DEFAULT_BLOCKCACHE; + int max_cell_size = HColumnDescriptor.DEFAULT_LENGTH; + int ttl = HColumnDescriptor.DEFAULT_TTL; + boolean bloomfilter = HColumnDescriptor.DEFAULT_BLOOMFILTER; + + if (currentTDesp != null) { + HColumnDescriptor currentCDesp = currentTDesp.getFamily(Bytes + .toBytes(colname)); + if (currentCDesp != null) { + max_versions = currentCDesp.getMaxVersions(); + // compression = currentCDesp.getCompression(); + in_memory = currentCDesp.isInMemory(); + block_cache = currentCDesp.isBlockCacheEnabled(); + max_cell_size = currentCDesp.getMaxValueLength(); + ttl = currentCDesp.getTimeToLive(); + bloomfilter = currentCDesp.isBloomfilter(); + } + } + + NodeList max_versions_list = columnfamily + .getElementsByTagName("max-versions"); + if (max_versions_list.getLength() > 0) { + max_versions = Integer.parseInt(max_versions_list.item(0).getFirstChild() + .getNodeValue()); + } + + NodeList compression_list = columnfamily + .getElementsByTagName("compression"); + if (compression_list.getLength() > 0) { + compression = CompressionType.valueOf(compression_list.item(0) + .getFirstChild().getNodeValue()); + } + + NodeList in_memory_list = columnfamily.getElementsByTagName("in-memory"); + if (in_memory_list.getLength() > 0) { + in_memory = Boolean.valueOf(in_memory_list.item(0).getFirstChild() + .getNodeValue()); + } + + NodeList block_cache_list = columnfamily + .getElementsByTagName("block-cache"); + if (block_cache_list.getLength() > 0) { + block_cache = Boolean.valueOf(block_cache_list.item(0).getFirstChild() + .getNodeValue()); + } + + NodeList max_cell_size_list = columnfamily + .getElementsByTagName("max-cell-size"); + if (max_cell_size_list.getLength() > 0) { + max_cell_size = Integer.valueOf(max_cell_size_list.item(0) + .getFirstChild().getNodeValue()); + } + + NodeList ttl_list = columnfamily.getElementsByTagName("time-to-live"); + if (ttl_list.getLength() > 0) { + ttl = Integer.valueOf(ttl_list.item(0).getFirstChild().getNodeValue()); + } + + NodeList bloomfilter_list = columnfamily + .getElementsByTagName("bloomfilter"); + if (bloomfilter_list.getLength() > 0) { + bloomfilter = Boolean.valueOf(bloomfilter_list.item(0).getFirstChild() + .getNodeValue()); + } + + HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes(colname), + max_versions, compression, in_memory, block_cache, max_cell_size, ttl, + bloomfilter); + + NodeList metadataList = columnfamily.getElementsByTagName("metadata"); + for (int i = 0; i < metadataList.getLength(); i++) { + Element metadataColumn = (Element) metadataList.item(i); + // extract the name and value children + Node mname_node = metadataColumn.getElementsByTagName("name").item(0); + String mname = mname_node.getFirstChild().getNodeValue(); + Node mvalue_node = metadataColumn.getElementsByTagName("value").item(0); + String mvalue = mvalue_node.getFirstChild().getNodeValue(); + hcd.setValue(mname, mvalue); + } + + return hcd; + } + + protected String makeColumnName(String column) { + String returnColumn = column; + if (column.indexOf(':') == -1) + returnColumn += ':'; + return returnColumn; + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.parser.IHBaseRestParser#getColumnDescriptors + * (byte[]) + */ + public ArrayList getColumnDescriptors(byte[] input) + throws HBaseRestException { + DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory + .newInstance(); + docBuilderFactory.setIgnoringComments(true); + + DocumentBuilder builder = null; + Document doc = null; + ArrayList columns = new ArrayList(); + + try { + builder = docBuilderFactory.newDocumentBuilder(); + ByteArrayInputStream is = new ByteArrayInputStream(input); + doc = builder.parse(is); + } catch (Exception e) { + throw new HBaseRestException(e); + } + + NodeList columnfamily_nodes = doc.getElementsByTagName("columnfamily"); + for (int i = 0; i < columnfamily_nodes.getLength(); i++) { + Element columnfamily = (Element) columnfamily_nodes.item(i); + columns.add(this.getColumnDescriptor(columnfamily)); + } + + return columns; + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.parser.IHBaseRestParser#getScannerDescriptor + * (byte[]) + */ + public ScannerDescriptor getScannerDescriptor(byte[] input) + throws HBaseRestException { + // TODO Auto-generated method stub + return null; + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.parser.IHBaseRestParser#getRowUpdateDescriptor + * (byte[], byte[][]) + */ + public RowUpdateDescriptor getRowUpdateDescriptor(byte[] input, + byte[][] pathSegments) throws HBaseRestException { + RowUpdateDescriptor rud = new RowUpdateDescriptor(); + + rud.setTableName(Bytes.toString(pathSegments[0])); + rud.setRowName(Bytes.toString(pathSegments[2])); + + DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory + .newInstance(); + docBuilderFactory.setIgnoringComments(true); + + DocumentBuilder builder = null; + Document doc = null; + + try { + builder = docBuilderFactory.newDocumentBuilder(); + ByteArrayInputStream is = new ByteArrayInputStream(input); + doc = builder.parse(is); + } catch (Exception e) { + throw new HBaseRestException(e.getMessage(), e); + } + + NodeList cell_nodes = doc.getElementsByTagName(RESTConstants.COLUMN); + System.out.println("cell_nodes.length: " + cell_nodes.getLength()); + for (int i = 0; i < cell_nodes.getLength(); i++) { + String columnName = null; + byte[] value = null; + + Element cell = (Element) cell_nodes.item(i); + + NodeList item = cell.getElementsByTagName(RESTConstants.NAME); + if (item.getLength() > 0) { + columnName = item.item(0).getFirstChild().getNodeValue(); + } + + NodeList item1 = cell.getElementsByTagName(RESTConstants.VALUE); + if (item1.getLength() > 0) { + value = org.apache.hadoop.hbase.util.Base64.decode(item1 + .item(0).getFirstChild().getNodeValue()); + } + + if (columnName != null && value != null) { + rud.getColVals().put(columnName.getBytes(), value); + } + } + return rud; + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/serializer/AbstractRestSerializer.java b/src/java/org/apache/hadoop/hbase/rest/serializer/AbstractRestSerializer.java new file mode 100644 index 000000000000..c055e7fe2c00 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/serializer/AbstractRestSerializer.java @@ -0,0 +1,58 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.serializer; + +import javax.servlet.http.HttpServletResponse; + +/** + * + * Abstract object that is used as the base of all serializers in the + * REST based interface. + */ +public abstract class AbstractRestSerializer implements IRestSerializer { + + // keep the response object to write back to the stream + protected final HttpServletResponse response; + // Used to denote if pretty printing of the output should be used + protected final boolean prettyPrint; + + /** + * marking the default constructor as private so it will never be used. + */ + @SuppressWarnings("unused") + private AbstractRestSerializer() { + response = null; + prettyPrint = false; + } + + /** + * Public constructor for AbstractRestSerializer. This is the constructor that + * should be called whenever creating a RestSerializer object. + * + * @param response + */ + public AbstractRestSerializer(HttpServletResponse response, + boolean prettyPrint) { + super(); + this.response = response; + this.prettyPrint = prettyPrint; + } + +} diff --git a/src/java/org/apache/hadoop/hbase/rest/serializer/IRestSerializer.java b/src/java/org/apache/hadoop/hbase/rest/serializer/IRestSerializer.java new file mode 100644 index 000000000000..e91db354d1ff --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/serializer/IRestSerializer.java @@ -0,0 +1,173 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.serializer; + +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.io.Cell; +import org.apache.hadoop.hbase.io.RowResult; +import org.apache.hadoop.hbase.rest.DatabaseModel.DatabaseMetadata; +import org.apache.hadoop.hbase.rest.Status.StatusMessage; +import org.apache.hadoop.hbase.rest.TableModel.Regions; +import org.apache.hadoop.hbase.rest.descriptors.ScannerIdentifier; +import org.apache.hadoop.hbase.rest.descriptors.TimestampsDescriptor; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; + +/** + * + * Interface that is implemented to return serialized objects back to + * the output stream. + */ +public interface IRestSerializer { + /** + * Serializes an object into the appropriate format and writes it to the + * output stream. + * + * This is the main point of entry when for an object to be serialized to the + * output stream. + * + * @param o + * @throws HBaseRestException + */ + public void writeOutput(Object o) throws HBaseRestException; + + /** + * serialize the database metadata + * + * Implementation of this method is optional, IF all the work is done in the + * writeOutput(Object o) method + * + * @param databaseMetadata + * @throws HBaseRestException + */ + public void serializeDatabaseMetadata(DatabaseMetadata databaseMetadata) + throws HBaseRestException; + + /** + * serialize the HTableDescriptor object + * + * Implementation of this method is optional, IF all the work is done in the + * writeOutput(Object o) method + * + * @param tableDescriptor + * @throws HBaseRestException + */ + public void serializeTableDescriptor(HTableDescriptor tableDescriptor) + throws HBaseRestException; + + /** + * serialize an HColumnDescriptor to the output stream. + * + * Implementation of this method is optional, IF all the work is done in the + * writeOutput(Object o) method + * + * @param column + * @throws HBaseRestException + */ + public void serializeColumnDescriptor(HColumnDescriptor column) + throws HBaseRestException; + + /** + * serialize the region data for a table to the output stream + * + * Implementation of this method is optional, IF all the work is done in the + * writeOutput(Object o) method + * + * @param regions + * @throws HBaseRestException + */ + public void serializeRegionData(Regions regions) throws HBaseRestException; + + /** + * serialize the status message object to the output stream + * + * Implementation of this method is optional, IF all the work is done in the + * writeOutput(Object o) method + * + * @param message + * @throws HBaseRestException + */ + public void serializeStatusMessage(StatusMessage message) + throws HBaseRestException; + + /** + * serialize the ScannerIdentifier object to the output stream + * + * Implementation of this method is optional, IF all the work is done in the + * writeOutput(Object o) method + * + * @param scannerIdentifier + * @throws HBaseRestException + */ + public void serializeScannerIdentifier(ScannerIdentifier scannerIdentifier) + throws HBaseRestException; + + /** + * serialize a RowResult object to the output stream + * + * Implementation of this method is optional, IF all the work is done in the + * writeOutput(Object o) method + * + * @param rowResult + * @throws HBaseRestException + */ + public void serializeRowResult(RowResult rowResult) throws HBaseRestException; + + /** + * serialize a RowResult array to the output stream + * + * Implementation of this method is optional, IF all the work is done in the + * writeOutput(Object o) method + * + * @param rows + * @throws HBaseRestException + */ + public void serializeRowResultArray(RowResult[] rows) + throws HBaseRestException; + + /** + * serialize a cell object to the output stream + * + * Implementation of this method is optional, IF all the work is done in the + * writeOutput(Object o) method + * + * @param cell + * @throws HBaseRestException + */ + public void serializeCell(Cell cell) throws HBaseRestException; + + /** + * serialize a Cell array to the output stream + * + * @param cells + * @throws HBaseRestException + */ + public void serializeCellArray(Cell[] cells) throws HBaseRestException; + + + /** + * serialize a description of the timestamps available for a row + * to the output stream. + * + * @param timestampsDescriptor + * @throws HBaseRestException + */ + public void serializeTimestamps(TimestampsDescriptor timestampsDescriptor) throws HBaseRestException; +} diff --git a/src/java/org/apache/hadoop/hbase/rest/serializer/ISerializable.java b/src/java/org/apache/hadoop/hbase/rest/serializer/ISerializable.java new file mode 100644 index 000000000000..d4828543bbc9 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/serializer/ISerializable.java @@ -0,0 +1,42 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.serializer; + +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; + +/** + * + * Interface for objects that wish to write back to the REST based + * interface output stream. Objects should implement this interface, + * then use the IRestSerializer passed to it to call the appropriate + * serialization method. + */ +public interface ISerializable { + /** + * visitor pattern method where the object implementing this interface will + * call back on the IRestSerializer with the correct method to run to + * serialize the output of the object to the stream. + * + * @param serializer + * @throws HBaseRestException + */ + public void restSerialize(IRestSerializer serializer) + throws HBaseRestException; +} diff --git a/src/java/org/apache/hadoop/hbase/rest/serializer/JSONSerializer.java b/src/java/org/apache/hadoop/hbase/rest/serializer/JSONSerializer.java new file mode 100644 index 000000000000..d54df8d44094 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/serializer/JSONSerializer.java @@ -0,0 +1,213 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.serializer; + +import javax.servlet.http.HttpServletResponse; + +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.io.Cell; +import org.apache.hadoop.hbase.io.RowResult; +import org.apache.hadoop.hbase.rest.DatabaseModel.DatabaseMetadata; +import org.apache.hadoop.hbase.rest.Status.StatusMessage; +import org.apache.hadoop.hbase.rest.TableModel.Regions; +import org.apache.hadoop.hbase.rest.descriptors.ScannerIdentifier; +import org.apache.hadoop.hbase.rest.descriptors.TimestampsDescriptor; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; + +import agilejson.JSON; + +/** + * + * Serializes objects into JSON strings and prints them back out on the output + * stream. It should be noted that this JSON implementation uses annotations on + * the objects to be serialized. + * + * Since these annotations are used to describe the serialization of the objects + * the only method that is implemented is writeOutput(Object o). The other + * methods in the interface do not need to be implemented. + */ +public class JSONSerializer extends AbstractRestSerializer { + + /** + * @param response + */ + public JSONSerializer(HttpServletResponse response) { + super(response, false); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#writeOutput(java + * .lang.Object, javax.servlet.http.HttpServletResponse) + */ + public void writeOutput(Object o) throws HBaseRestException { + response.setContentType("application/json"); + + try { + // LOG.debug("At top of send data"); + String data = JSON.toJSON(o); + response.setContentLength(data.length()); + response.getWriter().println(data); + } catch (Exception e) { + // LOG.debug("Error sending data: " + e.toString()); + throw new HBaseRestException(e); + } + + } + + /* + * (non-Javadoc) + * + * @seeorg.apache.hadoop.hbase.rest.serializer.IRestSerializer# + * serializeColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) + */ + public void serializeColumnDescriptor(HColumnDescriptor column) + throws HBaseRestException { + // No implementation needed for the JSON serializer + + } + + /* + * (non-Javadoc) + * + * @seeorg.apache.hadoop.hbase.rest.serializer.IRestSerializer# + * serializeDatabaseMetadata + * (org.apache.hadoop.hbase.rest.DatabaseModel.DatabaseMetadata) + */ + public void serializeDatabaseMetadata(DatabaseMetadata databaseMetadata) + throws HBaseRestException { + // No implementation needed for the JSON serializer + + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeRegionData + * (org.apache.hadoop.hbase.rest.TableModel.Regions) + */ + public void serializeRegionData(Regions regions) throws HBaseRestException { + // No implementation needed for the JSON serializer + + } + + /* + * (non-Javadoc) + * + * @seeorg.apache.hadoop.hbase.rest.serializer.IRestSerializer# + * serializeTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) + */ + public void serializeTableDescriptor(HTableDescriptor tableDescriptor) + throws HBaseRestException { + // No implementation needed for the JSON serializer + + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeStatusMessage + * (org.apache.hadoop.hbase.rest.Status.StatusMessage) + */ + public void serializeStatusMessage(StatusMessage message) + throws HBaseRestException { + // No implementation needed for the JSON serializer + + } + + /* + * (non-Javadoc) + * + * @seeorg.apache.hadoop.hbase.rest.serializer.IRestSerializer# + * serializeScannerIdentifier(org.apache.hadoop.hbase.rest.ScannerIdentifier) + */ + public void serializeScannerIdentifier(ScannerIdentifier scannerIdentifier) + throws HBaseRestException { + // No implementation needed for the JSON serializer + + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeRowResult + * (org.apache.hadoop.hbase.io.RowResult) + */ + public void serializeRowResult(RowResult rowResult) throws HBaseRestException { + // No implementation needed for the JSON serializer + + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeRowResultArray + * (org.apache.hadoop.hbase.io.RowResult[]) + */ + public void serializeRowResultArray(RowResult[] rows) + throws HBaseRestException { + // No implementation needed for the JSON serializer + + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeCell(org + * .apache.hadoop.hbase.io.Cell) + */ + public void serializeCell(Cell cell) throws HBaseRestException { + // No implementation needed for the JSON serializer + + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeCellArray + * (org.apache.hadoop.hbase.io.Cell[]) + */ + public void serializeCellArray(Cell[] cells) throws HBaseRestException { + // No implementation needed for the JSON serializer + + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeTimestamps + * (org.apache.hadoop.hbase.rest.RowModel.TimestampsDescriptor) + */ + public void serializeTimestamps(TimestampsDescriptor timestampsDescriptor) + throws HBaseRestException { + // No implementation needed for the JSON serializer + } + +} diff --git a/src/java/org/apache/hadoop/hbase/rest/serializer/RestSerializerFactory.java b/src/java/org/apache/hadoop/hbase/rest/serializer/RestSerializerFactory.java new file mode 100644 index 000000000000..9284da0de8c6 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/serializer/RestSerializerFactory.java @@ -0,0 +1,56 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.serializer; + +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.hadoop.hbase.rest.Dispatcher.ContentType; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; + +/** + * + * Factory used to return a Rest Serializer tailored to the HTTP + * Requesters accept type in the header. + * + */ +public class RestSerializerFactory { + + public static AbstractRestSerializer getSerializer( + HttpServletRequest request, HttpServletResponse response) + throws HBaseRestException { + ContentType ct = ContentType.getContentType(request.getHeader("accept")); + AbstractRestSerializer serializer = null; + + // TODO refactor this so it uses reflection to create the new objects. + switch (ct) { + case XML: + serializer = new SimpleXMLSerializer(response); + break; + case JSON: + serializer = new JSONSerializer(response); + break; + default: + serializer = new SimpleXMLSerializer(response); + break; + } + return serializer; + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/serializer/SimpleXMLSerializer.java b/src/java/org/apache/hadoop/hbase/rest/serializer/SimpleXMLSerializer.java new file mode 100644 index 000000000000..12b30a841857 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/serializer/SimpleXMLSerializer.java @@ -0,0 +1,464 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.serializer; + +import java.io.IOException; +import java.io.PrintWriter; + +import javax.servlet.http.HttpServletResponse; + +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.io.Cell; +import org.apache.hadoop.hbase.io.RowResult; +import org.apache.hadoop.hbase.rest.DatabaseModel.DatabaseMetadata; +import org.apache.hadoop.hbase.rest.Status.StatusMessage; +import org.apache.hadoop.hbase.rest.TableModel.Regions; +import org.apache.hadoop.hbase.rest.descriptors.RestCell; +import org.apache.hadoop.hbase.rest.descriptors.ScannerIdentifier; +import org.apache.hadoop.hbase.rest.descriptors.TimestampsDescriptor; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * + * Basic first pass at implementing an XML serializer for the REST interface. + * This should probably be refactored into something better. + * + */ +public class SimpleXMLSerializer extends AbstractRestSerializer { + + private final AbstractPrinter printer; + + /** + * @param response + * @throws HBaseRestException + */ + @SuppressWarnings("synthetic-access") + public SimpleXMLSerializer(HttpServletResponse response) + throws HBaseRestException { + super(response, false); + printer = new SimplePrinter(response); + } + + @SuppressWarnings("synthetic-access") + public SimpleXMLSerializer(HttpServletResponse response, boolean prettyPrint) + throws HBaseRestException { + super(response, prettyPrint); + if (prettyPrint) { + printer = new PrettyPrinter(response); + } else { + printer = new SimplePrinter(response); + } + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#writeOutput(java + * .lang.Object, java.io.OutputStream) + */ + public void writeOutput(Object o) throws HBaseRestException { + response.setContentType("text/xml"); + response.setCharacterEncoding(HConstants.UTF8_ENCODING); + + if (o instanceof ISerializable) { + ((ISerializable) o).restSerialize(this); + } else if (o.getClass().isArray() + && o.getClass().getComponentType() == RowResult.class) { + this.serializeRowResultArray((RowResult[]) o); + } else if (o.getClass().isArray() + && o.getClass().getComponentType() == Cell.class) { + this.serializeCellArray((Cell[]) o); + } else { + throw new HBaseRestException( + "Object does not conform to the ISerializable " + + "interface. Unable to generate xml output."); + } + } + + /* + * (non-Javadoc) + * + * @seeorg.apache.hadoop.hbase.rest.serializer.IRestSerializer# + * serializeDatabaseMetadata + * (org.apache.hadoop.hbase.rest.DatabaseModel.DatabaseMetadata) + */ + public void serializeDatabaseMetadata(DatabaseMetadata databaseMetadata) + throws HBaseRestException { + printer.print(""); + for (HTableDescriptor table : databaseMetadata.getTables()) { + table.restSerialize(this); + } + printer.print(""); + printer.flush(); + } + + /* + * (non-Javadoc) + * + * @seeorg.apache.hadoop.hbase.rest.serializer.IRestSerializer# + * serializeTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) + */ + public void serializeTableDescriptor(HTableDescriptor tableDescriptor) + throws HBaseRestException { + printer.print(""); + // name element + printer.print(""); + printer.print(tableDescriptor.getNameAsString()); + printer.print(""); + // column families + printer.print(""); + for (HColumnDescriptor column : tableDescriptor.getColumnFamilies()) { + column.restSerialize(this); + } + printer.print(""); + printer.print("
"); + printer.flush(); + + } + + /* + * (non-Javadoc) + * + * @seeorg.apache.hadoop.hbase.rest.serializer.IRestSerializer# + * serializeColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) + */ + public void serializeColumnDescriptor(HColumnDescriptor column) + throws HBaseRestException { + + printer.print(""); + // name + printer.print(""); + printer.print(org.apache.hadoop.hbase.util.Base64.encodeBytes(column.getName())); + printer.print(""); + // compression + printer.print(""); + printer.print(column.getCompression().toString()); + printer.print(""); + // bloomfilter + printer.print(""); + printer.print(column.getCompressionType().toString()); + printer.print(""); + // max-versions + printer.print(""); + printer.print(column.getMaxVersions()); + printer.print(""); + // max-length + printer.print(""); + printer.print(column.getMaxValueLength()); + printer.print(""); + printer.print(""); + printer.flush(); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeRegionData + * (org.apache.hadoop.hbase.rest.TableModel.Regions) + */ + public void serializeRegionData(Regions regions) throws HBaseRestException { + + printer.print(""); + for (byte[] region : regions.getRegionKey()) { + printer.print(""); + printer.print(Bytes.toString(region)); + printer.print(""); + } + printer.print(""); + printer.flush(); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeStatusMessage + * (org.apache.hadoop.hbase.rest.Status.StatusMessage) + */ + public void serializeStatusMessage(StatusMessage message) + throws HBaseRestException { + + printer.print(""); + printer.print(""); + printer.print(message.getStatusCode()); + printer.print(""); + printer.print(""); + printer.print(message.getMessage().toString()); + printer.print(""); + printer.print(""); + printer.print(message.getError()); + printer.print(""); + printer.print(""); + printer.flush(); + + } + + /* + * (non-Javadoc) + * + * @seeorg.apache.hadoop.hbase.rest.serializer.IRestSerializer# + * serializeScannerIdentifier(org.apache.hadoop.hbase.rest.ScannerIdentifier) + */ + public void serializeScannerIdentifier(ScannerIdentifier scannerIdentifier) + throws HBaseRestException { + + printer.print(""); + printer.print(""); + printer.print(scannerIdentifier.getId()); + printer.print(""); + printer.print(""); + printer.flush(); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeRowResult + * (org.apache.hadoop.hbase.io.RowResult) + */ + public void serializeRowResult(RowResult rowResult) throws HBaseRestException { + + printer.print(""); + printer.print(""); + printer.print(org.apache.hadoop.hbase.util.Base64.encodeBytes(rowResult + .getRow())); + printer.print(""); + printer.print(""); + for (RestCell cell : rowResult.getCells()) { + printer.print(""); + printer.print(""); + printer.print(org.apache.hadoop.hbase.util.Base64.encodeBytes(cell + .getName())); + printer.print(""); + printer.print(""); + printer.print(cell.getTimestamp()); + printer.print(""); + printer.print(""); + printer.print(org.apache.hadoop.hbase.util.Base64.encodeBytes(cell + .getValue())); + printer.print(""); + printer.print(""); + printer.flush(); + } + printer.print(""); + printer.print(""); + printer.flush(); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeRowResultArray + * (org.apache.hadoop.hbase.io.RowResult[]) + */ + public void serializeRowResultArray(RowResult[] rows) + throws HBaseRestException { + printer.print(""); + for (RowResult row : rows) { + row.restSerialize(this); + } + printer.print(""); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeCell(org + * .apache.hadoop.hbase.io.Cell) + */ + public void serializeCell(Cell cell) throws HBaseRestException { + printer.print(""); + printer.print(""); + printer.print(org.apache.hadoop.hbase.util.Base64.encodeBytes(cell + .getValue())); + printer.print(""); + printer.print(""); + printer.print(cell.getTimestamp()); + printer.print(""); + printer.print(""); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeCellArray + * (org.apache.hadoop.hbase.io.Cell[]) + */ + public void serializeCellArray(Cell[] cells) throws HBaseRestException { + printer.print(""); + for (Cell cell : cells) { + cell.restSerialize(this); + } + printer.print(""); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeTimestamps + * (org.apache.hadoop.hbase.rest.RowModel.TimestampsDescriptor) + */ + public void serializeTimestamps(TimestampsDescriptor timestampsDescriptor) + throws HBaseRestException { + // TODO Auto-generated method stub + + } + + // Private classes used for printing the output + + private interface IPrinter { + public void print(String output); + + public void print(int output); + + public void print(long output); + + public void print(boolean output); + + public void flush(); + } + + private abstract class AbstractPrinter implements IPrinter { + protected final PrintWriter writer; + + @SuppressWarnings("unused") + private AbstractPrinter() { + writer = null; + } + + public AbstractPrinter(HttpServletResponse response) + throws HBaseRestException { + try { + writer = response.getWriter(); + } catch (IOException e) { + throw new HBaseRestException(e.getMessage(), e); + } + } + + public void flush() { + writer.flush(); + } + } + + private class SimplePrinter extends AbstractPrinter { + private SimplePrinter(HttpServletResponse response) + throws HBaseRestException { + super(response); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.SimpleXMLSerializer.Printer#print + * (java.io.PrintWriter, java.lang.String) + */ + public void print(final String output) { + writer.print(output); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.SimpleXMLSerializer.IPrinter# + * print(int) + */ + public void print(int output) { + writer.print(output); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.SimpleXMLSerializer.IPrinter# + * print(long) + */ + public void print(long output) { + writer.print(output); + } + + /* (non-Javadoc) + * @see org.apache.hadoop.hbase.rest.serializer.SimpleXMLSerializer.IPrinter#print(boolean) + */ + public void print(boolean output) { + writer.print(output); + } + } + + private class PrettyPrinter extends AbstractPrinter { + private PrettyPrinter(HttpServletResponse response) + throws HBaseRestException { + super(response); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.SimpleXMLSerializer.Printer#print + * (java.io.PrintWriter, java.lang.String) + */ + public void print(String output) { + writer.println(output); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.SimpleXMLSerializer.IPrinter# + * print(int) + */ + public void print(int output) { + writer.println(output); + + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.SimpleXMLSerializer.IPrinter# + * print(long) + */ + public void print(long output) { + writer.println(output); + } + + /* (non-Javadoc) + * @see org.apache.hadoop.hbase.rest.serializer.SimpleXMLSerializer.IPrinter#print(boolean) + */ + public void print(boolean output) { + writer.println(output); + } + } +} diff --git a/src/webapps/rest/WEB-INF/web.xml b/src/webapps/rest/WEB-INF/web.xml index f9db246ff5c3..01aa3b73c428 100644 --- a/src/webapps/rest/WEB-INF/web.xml +++ b/src/webapps/rest/WEB-INF/web.xml @@ -1,14 +1,14 @@ - rest + jsonrest - Hbase REST Interface + Hbase JSONREST Interface api api org.apache.hadoop.hbase.rest.Dispatcher api - /* + /api/*