2013-11-10 2 views
0

У меня есть большая таблица в hbase, это имя Хваэтера. в этой таблице есть некоторые columnfamily, как это: Hweather (провинция, город, станция, мгновенный).как сканировать часть строк в hbase из eclipse

В этих семействах столбцов есть собственные классификаторы столбцов. , поэтому я хочу, чтобы select или средство сканировали два столбца (дата, температура), когда columnfamily является Instant, а когда префикс rowkey равен 16,20, .. и затем прекратите операцию сканирования, когда префикс rowkey равен 17,20.

это означает начало строки является 16,20, ... и остановить строка 17,20, ...

поэтому я стараюсь сделать это в Java. но я сделал nit получить ответы. Вы можете помочь мне исправить это и сделать эту операцию?

код, как ниже:

package scan; 
import java.io.IOException; 
import org.apache.hadoop.conf.Configuration; 
import org.apache.hadoop.hbase.HBaseConfiguration; 
import org.apache.hadoop.hbase.client.HTable; 
import org.apache.hadoop.hbase.client.Result; 
import org.apache.hadoop.hbase.client.ResultScanner; 
import org.apache.hadoop.hbase.client.Scan; 
import org.apache.hadoop.hbase.filter.Filter; 
import org.apache.hadoop.hbase.filter.PrefixFilter; 
import org.apache.hadoop.hbase.util.Bytes; 

public class scan2 { 

public static void main(String args[]) throws NoClassDefFoundError, IOException { 

    Configuration config = HBaseConfiguration.create(); 
      config.set("hbase.zookeeper.quorum", "localhost"); 
      HTable table = new HTable(config, "Hweather"); 

    Filter colfilter=new PrefixFilter(Bytes.toBytes("Date")); 
    Filter colfilter1=new PrefixFilter(Bytes.toBytes("Temperature")); 
    Scan scan1 = new Scan(Bytes.toBytes("16,20,100,1"),(Bytes.toBytes("17,20,100,1"))); 
    Scan scan2 = new Scan(Bytes.toBytes("16,20,100,1"),(Bytes.toBytes("17,20,100,1"))); 
    scan1.setFilter(colfilter1); 
    scan2.setFilter(colfilter2); 
    ResultScanner scanner1= table.getScanner(scan1); 
    ResultScanner scanner2= table.getScanner(scan2); 

    for (Result result1= scanner1.next(); result1 !=null; result1= scanner1.next()) { 
     for (Result result2= scanner2.next(); result2 !=null; result2= scanner2.next()) { 
     System.out.println("scanner1" + "scanner2"); 
     } 

    } 
    } 

} 

это не работает, и просто показать мне:

2013-11-10 16:20:20,959 INFO [main] zookeeper.ZooKeeper  
(Environment.java:logEnv(100)) - Client environment:zookeeper.version=3.4.5-1392090, 
built on 09/30/2012 17:52 GMT 
2013-11-10 16:20:20,963 INFO [main] zookeeper.ZooKeeper (Envi 
    ronment.java:logEnv(100)) - Client environment:host.name=localhost 
2013-11-10 16:20:20,963 INFO [main] zookeeper.ZooKeeper 
(Environment.java:logEnv(100)) - Client environment:java.version=1.6.0_27 
2013-11-10 16:20:20,964 INFO [main] zookeeper.ZooKeeper 
(Environment.java:logEnv(100)) - Client environment:java.vendor=Sun Microsystems Inc. 
2013-11-10 16:20:20,964 INFO [main] zookeeper.ZooKeeper 
(Environment.java:logEnv(100)) - Client environment:java.home=/usr/lib/jvm/java- 
6-openjdk-i386/jre 
    2013-11-10 16:20:20,969 INFO [main] zookeeper.ZooKeeper 
(Environment.java:logEnv(100)) - Client environment:java.class.path=/home/ubuntu 
/workspace/Scan/bin:/usr/local/hadoop/lib/hadoop-yarn-server-web-proxy-2.0.2-alpha.jar: 
/usr/local/hadoop/lib/hbase-protocol-0.95.0-hadoop2.jar:/usr/local/hadoop/lib/hadoop- 
fairscheduler-1.0.4.jar:/usr/local/hadoop/lib/hadoop-minicluster-2.0.2-alpha.jar: 
/usr/local/hadoop/lib/hadoop-yarn-api-2.0.2-alpha.jar:/usr/local/hadoop/lib/hadoop- 
mapreduce-client-app-2.0.2-alpha.jar:/usr/local/hadoop/lib/hadoop-auth-2.0.2-alpha.jar: 
/usr/local/hadoop/lib/hadoop-yarn-common-2.0.2-alpha.jar:/usr/local/hadoop/lib/hadoop- 
mapreduce-client-common-2.0.2-alpha.jar:/usr/local/hadoop/lib/hbase-common-0.95.0- 
hadoop2.jar:/usr/local/hadoop/lib/hbase-it-0.95.0-hadoop2.jar:/usr/local/hadoop 
/lib/hbase-client-0.95.0-hadoop2.jar:/usr/local/hadoop/lib/hadoop-hdfs-2.0.2-alpha.jar: 
/usr/local/hadoop/lib/hadoop-thriftfs-1.0.4.jar:/usr/local/hadoop/lib/hbase-hadoop-c 
ompat-0.95.0-hadoop2.jar:/usr/local/hadoop/lib/hadoop-yarn-server-common-2.0.2- 
alpha.jar:/usr/local/hadoop/lib/hadoop-mapreduce-client-hs-2.0.2-alpha.jar:/usr/local 
/hadoop/lib/hadoop-mapreduce-client-core-2.0.2-alpha.jar:/usr/local/hadoop/lib/hadoop- 
mapreduce-client-jobclient-2.0.2-alpha.jar:/usr/local/hadoop/lib/hadoop-yarn-server- 
nodemanager-2.0.2-alpha.jar:/usr/local/hadoop/lib/hadoop-annotations-2.0.2-alpha.jar: 
/usr/local/hadoop/lib/hadoop-common-2.0.2-alpha.jar:/usr/local/hadoop/lib/hadoop- 
yarn-client-2.0.2-alpha.jar:/usr/local/hadoop/lib/hbase-server-0.95.0-hadoop2.jar: 
/usr/local/hadoop/lib/hadoop-capacity-scheduler-1.0.4.jar:/usr/local/hadoop/lib/hadoop- 
mapreduce-client-shuffle-2.0.2-alpha.jar:/usr/local/hadoop/lib/hadoop-client-2.0.2- 
alpha.jar:/usr/local/hadoop/lib/hbase-prefix-tree-0.95.0-hadoop2.jar:/usr/local/hadoop 
/lib/hbase-hadoop2-compat-0.95.0-hadoop2.jar:/usr/local/hadoop/lib/hadoop-yarn-server- 
resourcemanager-2.0.2-alpha.jar:/usr/local/hadoop/lib/hbase-0.20.0.jar:/usr/local 
/hadoop/lib/activation-1.1.jar:/usr/local/hadoop/lib/aopalliance-1.0.jar:/usr/local 
/hadoop/lib/asm-3.1.jar:/usr/local/hadoop/lib/asm-3.2.jar:/usr/local/hadoop 
......... 
2013-11-10 16:20:20,970 INFO [main] zookeeper.ZooKeeper  
(Environment.java:logEnv(100)) - Client environment:java.compiler=<NA> 
2013-11-10 16:20:20,971 INFO [main] zookeeper.ZooKeeper 
(Environment.java:logEnv(100)) - Client environment:os.name=Linux 

2013-11-10 16:20:20,971 INFO [main] zookeeper.ZooKeeper 
(Environment.java:logEnv(100)) - Client environment:os.arch=i386 
2013-11-10 16:20:20,972 INFO [main] zookeeper.ZooKeeper 
(Environment.java:logEnv(100)) - Client environment:os.version=3.2.0-23-generic-pae 
2013-11-10 16:20:20,972 INFO [main] zookeeper.ZooKeeper 
(Environment.java:logEnv(100)) - Client environment:user.name=ubuntu 
2013-11-10 16:20:20,972 INFO [main] zookeeper.ZooKeeper 
(Environment.java:logEnv(100)) - Client environment:user.home=/home/ubuntu 
2013-11-10 16:20:20,973 INFO [main] zookeeper.ZooKeeper 
(Environment.java:logEnv(100)) - Client environment:user.dir=/home/ubuntu/workspace 
/Scan 

2013-11-10 16:20:20,974 INFO [main] zookeeper.ZooKeeper (ZooKeeper.java:<init>(438)) - 
    Initiating client connection, connectString=localhost:2181 sessionTimeout=180000  
watcher=hconnection-0x16546ef 
2013-11-10 16:20:21,002 INFO [main-SendThread(localhost:2181)] zookeeper.ClientCnxn 
(ClientCnxn.java:logStartConnect(966)) - Opening socket connection to server 
localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) 
2013-11-10 16:20:21,004 INFO [main] zookeeper.RecoverableZooKeeper 
(RecoverableZooKeeper.java:<init>(119)) - The identifier of this process is 
    hconnection-0x16546ef 
2013-11-10 16:20:21,017 INFO [main-SendThread(localhost:2181)] zookeeper.ClientCnxn 
(ClientCnxn.java:primeConnection(849)) - Socket connection established to 
localhost/127.0.0.1:2181, initiating session 
2013-11-10 16:20:21,038 INFO [main-SendThread(localhost:2181)] zookeeper.ClientCnxn 
(ClientCnxn.java:onConnected(1207)) - Session establishment complete on server 
localhost/127.0.0.1:2181, sessionid = 0x14241e6ac200005, negotiated timeout = 40000 
2013-11-10 16:20:21,116 INFO [main] 
client.HConnectionManager$HConnectionImplementation 
(HConnectionManager.java:retrieveClusterId(680)) - ClusterId is f6e88331-aea1-4bd3- 
    a579-f4a22524d9cf 
2013-11-10 16:20:21,385 WARN [main] conf.Configuration 
    (Configuration.java:warnOnceIfDeprecated(816)) - hadoop.native.lib is deprecated. 
    Instead, use io.native.lib.available 

и не дают мне никакого из put.can ты ведешь меня, как правильно это. или как я могу это сделать?

благодаря

+0

использование сканирования # addColumn вместо фильтров. – octo

ответ

0

Попробуйте это и посмотреть, если это помогает:

public class PrefixFilterDemo { 

     public static void main(String[] args) throws IOException { 


      Configuration conf = HBaseConfiguration.create();  
      HTable table = new HTable(conf, "Hweather");  
      Scan s = new Scan();  
      s.addColumn(Bytes.toBytes("Instant"), Bytes.toBytes("Date"));  
      s.addColumn(Bytes.toBytes("Instant"), Bytes.toBytes("Temprature")); 
      Filter f = new PrefixFilter(Bytes.toBytes("16,20"));  
      s.setFilter(f);  
      ResultScanner rs = table.getScanner(s);  
      for(Result r : rs){  
        System.out.println("ROW : " + Bytes.toString(r.getRow()));   
      }  
      rs.close();  
      table.close();  
     } 

} 
Смежные вопросы