I am trying to do SAP HANA Vora installation on HDP 2.3 using Ambari 2.2 on a 5 node cluster having 1 management node, 1 master node, 2 worker nodes and 1 jump box node.
After installing all the Vora services, I was trying to validate my installation.
As per the SAP HANA Vora document, I need to create a vora table (table using com.sap.spark.vora) from spark-sql but I am getting Exception. Stack trace below.
scala> import org.apache.spark.sql.SapSQLContext
import org.apache.spark.sql.SapSQLContext
scala> val vc = new SapSQLContext(sc)
16/05/04 06:54:32 INFO SapSQLContext: SapSQLContext [version: 1.2.33] created
vc: org.apache.spark.sql.SapSQLContext = org.apache.spark.sql.SapSQLContext#69e4e90b
scala> val testsql = """
| CREATE TABLE table001 (a1 double, a2 int, a3 string)
| USING com.sap.spark.vora
| OPTIONS (tablename "table001", paths "/user/vora/test.csv")"""
testsql: String =
"
CREATE TABLE table001 (a1 double, a2 int, a3 string)
USING com.sap.spark.vora
OPTIONS (tablename "table001", paths "/user/vora/test.csv")"
scala> vc.sql(testsql)
OperationException{statusCode=500, statusMessage='Internal Server Error', statusContent='No cluster leader'}
at com.ecwid.consul.v1.health.HealthConsulClient.getHealthServices(HealthConsulClient.java:96)
at com.ecwid.consul.v1.health.HealthConsulClient.getHealthServices(HealthConsulClient.java:80)
at com.ecwid.consul.v1.ConsulClient.getHealthServices(ConsulClient.java:324)
at com.sap.spark.vora.discovery.ConsulDiscoveryClient$ConsulDiscoveryClient.lookupService(ConsulDiscoveryClient.scala:45)
at com.sap.spark.vora.config.VoraConfiguration$.apply(VoraConfiguration.scala:37)
at com.sap.spark.vora.DefaultSource.buildConfiguration(DefaultSource.scala:403)
at com.sap.spark.vora.DefaultSource.createRelation(DefaultSource.scala:149)
at org.apache.spark.sql.execution.datasources.CreateTableUsingTemporaryAwareCommand.resolveDataSource(CreateTableUsingTemporaryAwareCommand.scala:73)
at org.apache.spark.sql.execution.datasources.CreateTableUsingTemporaryAwareCommand.run(CreateTableUsingTemporaryAwareCommand.scala:31)
at org.apache.spark.sql.execution.ExecutedCommand.sideEffectResult$lzycompute(commands.scala:57)
at org.apache.spark.sql.execution.ExecutedCommand.sideEffectResult(commands.scala:57)
at org.apache.spark.sql.execution.ExecutedCommand.doExecute(commands.scala:69)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:140)
at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:138)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:147)
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:138)
at org.apache.spark.sql.SQLContext$QueryExecution.toRdd$lzycompute(SQLContext.scala:933)
at org.apache.spark.sql.SQLContext$QueryExecution.toRdd(SQLContext.scala:933)
at org.apache.spark.sql.DataFrame.<init>(DataFrame.scala:144)
at org.apache.spark.sql.DataFrame.<init>(DataFrame.scala:129)
at org.apache.spark.sql.DataFrame$.apply(DataFrame.scala:51)
at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:725)
at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:27)
at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:32)
at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:34)
at $iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:36)
at $iwC$$iwC$$iwC$$iwC.<init>(<console>:38)
at $iwC$$iwC$$iwC.<init>(<console>:40)
at $iwC$$iwC.<init>(<console>:42)
at $iwC.<init>(<console>:44)
at <init>(<console>:46)
at .<init>(<console>:50)
at .<clinit>(<console>)
at .<init>(<console>:7)
at .<clinit>(<console>)
at $print(<console>)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:497)
at org.apache.spark.repl.SparkIMain$ReadEvalPrint.call(SparkIMain.scala:1065)
at org.apache.spark.repl.SparkIMain$Request.loadAndRun(SparkIMain.scala:1340)
at org.apache.spark.repl.SparkIMain.loadAndRunReq$1(SparkIMain.scala:840)
at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:871)
at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:819)
at org.apache.spark.repl.SparkILoop.reallyInterpret$1(SparkILoop.scala:857)
at org.apache.spark.repl.SparkILoop.interpretStartingWith(SparkILoop.scala:902)
at org.apache.spark.repl.SparkILoop.command(SparkILoop.scala:814)
at org.apache.spark.repl.SparkILoop.processLine$1(SparkILoop.scala:657)
at org.apache.spark.repl.SparkILoop.innerLoop$1(SparkILoop.scala:665)
at org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$loop(SparkILoop.scala:670)
at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply$mcZ$sp(SparkILoop.scala:997)
at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)
at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)
at scala.tools.nsc.util.ScalaClassLoader$.savingContextLoader(ScalaClassLoader.scala:135)
at org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$process(SparkILoop.scala:945)
at org.apache.spark.repl.SparkILoop.process(SparkILoop.scala:1059)
at org.apache.spark.repl.Main$.main(Main.scala:31)
at org.apache.spark.repl.Main.main(Main.scala)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:497)
at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:685)
at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:180)
at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:205)
at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:120)
at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
Kindly help me with this issue...
The error message No cluster leader indicates that the Vora Discovery Service is not running correctly. You need min 3 Discovery Servers on 3 different nodes and Discover Clients on all other nodes (never run Client and Server on the same node; they are mutually exclusive as they use the same port 8500). The Discovery Servers will elect a leader amongst themselves - according to the message this step did not happen. Likely cause is that < 3 servers are running correctly.
To solve the issue, run at least 3 Discovery Servers. Given that you have 1 master and 2 workers you probably want to run a Discovery server on each of the 3 nodes (no Discovery Clients on nodes that run Discovery Servers). The jump box should get a Discovery Clinet. For troubleshooting please see the Vora Installation and Administration Guide and the Troubleshooting Blog (section 'How to?' -> How to check the status of the Vora discovery service?)
Related
I use minio as the hive storage system, and there is no problem when I execute query statements like 'select * from table'.
But when I execute agg query like 'select max(age) from student',then I got an error:
java.nio.file.AccessDeniedException: hive: org.apache.hadoop.fs.s3a.auth.NoAuthWithAWSException: No AWS Credentials provided by SimpleAWSCredentialsProvider EnvironmentVariableCredentialsProvider InstanceProfileCredentialsProvider : com.amazonaws.SdkClientException: Unable to load credentials from service endpoint
at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:187)
at org.apache.hadoop.fs.s3a.Invoker.once(Invoker.java:111)
at org.apache.hadoop.fs.s3a.Invoker.lambda$retry$3(Invoker.java:265)
at org.apache.hadoop.fs.s3a.Invoker.retryUntranslated(Invoker.java:322)
at org.apache.hadoop.fs.s3a.Invoker.retry(Invoker.java:261)
at org.apache.hadoop.fs.s3a.Invoker.retry(Invoker.java:236)
at org.apache.hadoop.fs.s3a.S3AFileSystem.verifyBucketExists(S3AFileSystem.java:375)
at org.apache.hadoop.fs.s3a.S3AFileSystem.initialize(S3AFileSystem.java:311)
at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:3303)
at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:124)
at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:3352)
at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:3320)
at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:479)
at org.apache.hadoop.fs.Path.getFileSystem(Path.java:361)
at org.apache.hadoop.hive.ql.exec.Utilities.isEmptyPath(Utilities.java:2610)
at org.apache.hadoop.hive.ql.exec.Utilities.isEmptyPath(Utilities.java:2606)
at org.apache.hadoop.hive.ql.exec.Utilities$GetInputPathsCallable.call(Utilities.java:3432)
at org.apache.hadoop.hive.ql.exec.Utilities.getInputPaths(Utilities.java:3370)
at org.apache.hadoop.hive.ql.exec.mr.ExecDriver.execute(ExecDriver.java:359)
at org.apache.hadoop.hive.ql.exec.mr.MapRedTask.execute(MapRedTask.java:149)
at org.apache.hadoop.hive.ql.exec.Task.executeTask(Task.java:205)
at org.apache.hadoop.hive.ql.exec.TaskRunner.runSequential(TaskRunner.java:97)
at org.apache.hadoop.hive.ql.Driver.launchTask(Driver.java:2664)
at org.apache.hadoop.hive.ql.Driver.execute(Driver.java:2335)
at org.apache.hadoop.hive.ql.Driver.runInternal(Driver.java:2011)
at org.apache.hadoop.hive.ql.Driver.run(Driver.java:1709)
at org.apache.hadoop.hive.ql.Driver.run(Driver.java:1703)
at org.apache.hadoop.hive.ql.reexec.ReExecDriver.run(ReExecDriver.java:157)
at org.apache.hadoop.hive.ql.reexec.ReExecDriver.run(ReExecDriver.java:218)
at org.apache.hadoop.hive.cli.CliDriver.processLocalCmd(CliDriver.java:239)
at org.apache.hadoop.hive.cli.CliDriver.processCmd(CliDriver.java:188)
at org.apache.hadoop.hive.cli.CliDriver.processLine(CliDriver.java:402)
at org.apache.hadoop.hive.cli.CliDriver.executeDriver(CliDriver.java:821)
at org.apache.hadoop.hive.cli.CliDriver.run(CliDriver.java:759)
at org.apache.hadoop.hive.cli.CliDriver.main(CliDriver.java:683)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.hadoop.util.RunJar.run(RunJar.java:323)
at org.apache.hadoop.util.RunJar.main(RunJar.java:236)
Caused by: org.apache.hadoop.fs.s3a.auth.NoAuthWithAWSException: No AWS Credentials provided by SimpleAWSCredentialsProvider EnvironmentVariableCredentialsProvider InstanceProfileCredentialsProvider : com.amazonaws.SdkClientException: Unable to load credentials from service endpoint
at org.apache.hadoop.fs.s3a.AWSCredentialProviderList.getCredentials(AWSCredentialProviderList.java:159)
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.getCredentialsFromContext(AmazonHttpClient.java:1166)
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.runBeforeRequestHandlers(AmazonHttpClient.java:762)
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.doExecute(AmazonHttpClient.java:724)
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeWithTimer(AmazonHttpClient.java:717)
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.execute(AmazonHttpClient.java:699)
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.access$500(AmazonHttpClient.java:667)
at com.amazonaws.http.AmazonHttpClient$RequestExecutionBuilderImpl.execute(AmazonHttpClient.java:649)
at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:513)
at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4368)
at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4315)
at com.amazonaws.services.s3.AmazonS3Client.headBucket(AmazonS3Client.java:1344)
at com.amazonaws.services.s3.AmazonS3Client.doesBucketExist(AmazonS3Client.java:1284)
at org.apache.hadoop.fs.s3a.S3AFileSystem.lambda$verifyBucketExists$1(S3AFileSystem.java:376)
at org.apache.hadoop.fs.s3a.Invoker.once(Invoker.java:109)
... 39 more
Should I add some config in my fs system?
Yes, the same issue facing by us. You can open a tunnel from your local machine to the amazon instance to check the access.
In medium an article says a custom class kind of solution.
https://medium.com/expedia-group-tech/service-slow-to-retrieve-aws-credentials-ebc02a38e95b
I am trying to create a view on Apache Ignite using the following syntax:
emplCache.query(new SqlFieldsQuery(
"CREATE VIEW EmployeeCopy AS (SELECT * FROM Employee);")).getAll();
(Assume that table Employee is available and has data in it).
When this line gets executed, I get the following exception:
javax.cache.CacheException: class org.apache.ignite.IgniteCheckedException: null
at org.apache.ignite.internal.processors.query.GridQueryProcessor.querySqlFields(GridQueryProcessor.java:1823)
at org.apache.ignite.internal.processors.cache.IgniteCacheProxy.query(IgniteCacheProxy.java:795)
at org.apache.ignite.internal.processors.cache.IgniteCacheProxy.query(IgniteCacheProxy.java:765)
at com.demo.ignite.test1.EmployeeQuery2.createCopyTable(EmployeeQuery2.java:71)
at com.demo.ignite.test1.EmployeeQuery2.main(EmployeeQuery2.java:55)
Caused by: class org.apache.ignite.IgniteCheckedException: null
at org.apache.ignite.internal.processors.query.GridQueryProcessor.executeQuery(GridQueryProcessor.java:2316)
at org.apache.ignite.internal.processors.query.GridQueryProcessor.querySqlFields(GridQueryProcessor.java:1820)
... 4 more
Caused by: java.lang.NullPointerException
at org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing.queryDistributedSqlFields(IgniteH2Indexing.java:1343)
at org.apache.ignite.internal.processors.query.GridQueryProcessor$5.applyx(GridQueryProcessor.java:1815)
at org.apache.ignite.internal.processors.query.GridQueryProcessor$5.applyx(GridQueryProcessor.java:1813)
at org.apache.ignite.internal.util.lang.IgniteOutClosureX.apply(IgniteOutClosureX.java:36)
at org.apache.ignite.internal.processors.query.GridQueryProcessor.executeQuery(GridQueryProcessor.java:2293)
... 5 more
I see that the variable "twoStepQry" is null at line 1343 of class IgniteH2Indexing.java. I am not able to understand if I have missed something.
I am using apache-ignite-2.1.0.
Also if I create a VIEW, how does it work internally? Does it lock on those entries in the cache, or does it copy it to some other cache?
Ignite does not support CREATE VIEW for now.
I have created a ticket for this: https://issues.apache.org/jira/browse/IGNITE-5951
List of supported DDL statements can be found at https://apacheignite.readme.io/docs/distributed-ddl
I am new to Kylin,I create kylin model and cube by following url,
http://kylin.apache.org/
initially it is successfull,again i created new cube for the same model,at that time cube build is failed at 3rd step as,
#3 Step Name: Extract Fact Table Distinct Columns
actually i have some duplicated rows,so i deleted those rows in hive and i did sync kylin tables with hive tables.But that is not completing that 3rd step.I gone through the logs,i find the following error,
2016-12-29 11:50:45,421 ERROR [IPC Server handler 18 on 46096] org.apache.hadoop.mapred.TaskAttemptListenerImpl: Task: attempt_1482297779079_0128_m_000000_0 - exited : java.lang.ArrayIndexOutOfBoundsException: -1
at org.apache.kylin.engine.mr.steps.FactDistinctHiveColumnsMapper.putRowKeyToHLL(FactDistinctHiveColumnsMapper.java:179)
at org.apache.kylin.engine.mr.steps.FactDistinctHiveColumnsMapper.map(FactDistinctHiveColumnsMapper.java:155)
at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:146)
at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:787)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:341)
at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:168)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1724)
at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:162)
2016-12-29 11:50:45,421 INFO [IPC Server handler 18 on 46096] org.apache.hadoop.mapred.TaskAttemptListenerImpl: Diagnostics report from attempt_1482297779079_0128_m_000000_0: Error: java.lang.ArrayIndexOutOfBoundsException: -1
at org.apache.kylin.engine.mr.steps.FactDistinctHiveColumnsMapper.putRowKeyToHLL(FactDistinctHiveColumnsMapper.java:179)
at org.apache.kylin.engine.mr.steps.FactDistinctHiveColumnsMapper.map(FactDistinctHiveColumnsMapper.java:155)
at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:146)
at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:787)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:341)
anybody please share any idea how to solve this one.what is cardinality means in kylin data sources.
I want to run dataflow job to migrate data from google-project-1-table to google-project-2-table. (Read from one and write to another). I am getting permission issue while doing that. I have set "GOOGLE_APPLICATION_CREDENTIALS" to point to my credential file for project-1. In project-2 below are the permissions/roles for project-1. 1) service-account (role - Editor) 2) -compute#developer.gserviceaccount.com (role - Editor) 3) #cloudservices.gserviceaccount.com(role - Editor).
Is there anything else I need to do to run the job?
Caused by: com.google.bigtable.repackaged.com.google.cloud.grpc.io.IOExceptionWithStatus: Error in response stream
at com.google.bigtable.repackaged.com.google.cloud.grpc.scanner.ResultQueueEntry$ExceptionResultQueueEntry.getResponseOrThrow(ResultQueueEntry.java:66)
at com.google.bigtable.repackaged.com.google.cloud.grpc.scanner.ResponseQueueReader.getNextMergedRow(ResponseQueueReader.java:55)
at com.google.bigtable.repackaged.com.google.cloud.grpc.scanner.StreamingBigtableResultScanner.next(StreamingBigtableResultScanner.java:42)
at com.google.bigtable.repackaged.com.google.cloud.grpc.scanner.StreamingBigtableResultScanner.next(StreamingBigtableResultScanner.java:27)
at com.google.bigtable.repackaged.com.google.cloud.grpc.scanner.ResumingStreamingResultScanner.next(ResumingStreamingResultScanner.java:89)
at com.google.bigtable.repackaged.com.google.cloud.grpc.scanner.ResumingStreamingResultScanner.next(ResumingStreamingResultScanner.java:45)
at com.google.cloud.bigtable.dataflow.CloudBigtableIO$1.next(CloudBigtableIO.java:221)
at com.google.cloud.bigtable.dataflow.CloudBigtableIO$1.next(CloudBigtableIO.java:216)
at com.google.cloud.bigtable.dataflow.CloudBigtableIO$Reader.advance(CloudBigtableIO.java:775)
at com.google.cloud.bigtable.dataflow.CloudBigtableIO$Reader.start(CloudBigtableIO.java:799)
at com.google.cloud.dataflow.sdk.io.Read$Bounded$1.evaluateReadHelper(Read.java:178)
... 18 more
Caused by: com.google.bigtable.repackaged.io.grpc.StatusRuntimeException: PERMISSION_DENIED: User can't access project: project-2
at com.google.bigtable.repackaged.io.grpc.Status.asRuntimeException(Status.java:431)
at com.google.bigtable.repackaged.com.google.cloud.grpc.scanner.StreamObserverAdapter.onClose(StreamObserverAdapter.java:48)
at com.google.bigtable.repackaged.io.grpc.internal.ClientCallImpl$ClientStreamListenerImpl$3.runInContext(ClientCallImpl.java:462)
at com.google.bigtable.repackaged.io.grpc.internal.ContextRunnable.run(ContextRunnable.java:54)
at com.google.bigtable.repackaged.io.grpc.internal.SerializingExecutor$TaskRunner.run(SerializingExecutor.java:154)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
... 1 more
There are some instructions for this in the section "Accessing Cloud Platform Resources Across Multiple Cloud Platform Projects" of the Dataflow Security and Permissions guide.
Since that guide does not explicitly address Cloud BigTable, I will try to write up the requirements clearly here in terms of your question.
Using fake project id numbers, it seems you have:
A project project-1 with id 12345.
A project project-2 with id 9876
A Bigtable google-project-1-table in project-1
A Bigtable google-project-2-table in project-2
A Dataflow pipeline that will run in project-1, which you want to:
read from google-project-1-table
write to google-project-2-table
Is that accurate?
Your Dataflow workers that write to Bigtable run as the compute engine service account. That is 12345-compute#developer.gserviceaccount.com. This account will need to be able to access project-2 and write to google-project-2-table.
Your error message implies that the permissions failure occurs at the coarsest granularity - the account cannot access project-2 at all.
I had hbase cluster running on amazon ec2 nodes. I want to create the backup of my hbase table. So, I came up with this tool. I was able to create the back up of table dummy on s3 using the following command :
java com.bizosys.oneline.maintenance.HBaseBackup mode=backup.full backup.folder=s3://mybucket/ tables=dummy
But when i tried to restore the same data on some table(model). It failed with the following :
`13/10/24 10:52:52 WARN mapred.FileOutputCommitter: Output path is null in cleanup
13/10/24 10:52:52 WARN mapred.LocalJobRunner: job_local_0002
java.lang.NullPointerException
at org.apache.hadoop.fs.s3.Jets3tFileSystemStore.retrieveBlock(Jets3tFileSystemStore.java:209)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:601)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:82)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:59)
at $Proxy5.retrieveBlock(Unknown Source)
at org.apache.hadoop.fs.s3.S3InputStream.blockSeekTo(S3InputStream.java:160)
at org.apache.hadoop.fs.s3.S3InputStream.read(S3InputStream.java:119)
at java.io.DataInputStream.readFully(DataInputStream.java:195)
at java.io.DataInputStream.readFully(DataInputStream.java:169)
at org.apache.hadoop.io.SequenceFile$Reader.init(SequenceFile.java:1508)
at org.apache.hadoop.io.SequenceFile$Reader.<init>(SequenceFile.java:1486)
at org.apache.hadoop.io.SequenceFile$Reader.<init>(SequenceFile.java:1475)
at org.apache.hadoop.io.SequenceFile$Reader.<init>(SequenceFile.java:1470)
at org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader.initialize(SequenceFileRecordReader.java:50)
at org.apache.hadoop.mapred.MapTask$NewTrackingRecordReader.initialize(MapTask.java:522)
at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:763)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:370)
at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:212)
13/10/24 10:52:53 INFO mapred.JobClient: Job complete: job_local_0002
13/10/24 10:52:53 INFO mapred.JobClient: Counters: 0
Error in Job completetion Params
tablename inputputdir
model s3://mybucket/Wed_Oct_23_19_45_49_IST_2013/model
Access Failure to s3://mybucket/Wed_Oct_23_19_45_49_IST_2013/model , tries=1
`.
java com.bizosys.oneline.maintenance.HBaseBackup mode=restore backup.folder=s3://mybucket/Wed_Oct_23_19_45_49_IST_2013 tables="model"
FYI, please don't suggest me that there is an option of installation of hbase as well as back up on EMR. That i know but for some reason i am not using it.