Gerrit 2.15.1
Something broke in my Gerrit permission settings.
The story: I removed a Project in the Gerrit UI Projects list, and I checked that I wanted it removed even if there were open changes. But I saw that the project still was in my git-directory on the server. And forced removed it.
Now, as soon as I enter the Gerrit UI it says Code Review - Error, 500 internal server error.
I cant view any All: Open or merged changes. My: Change or Watched Changes. I get that error.
In my GERRIT_SITE/logs/error.log I get an error, see below, about unable to check permissions.
But this seems odd to be related to the above story.
Note: I am one of the administrators.
EDIT: My colleague doesn't have the same issue, but we can see that something is broken on my profile. Our logins are connected to Active Directory and we use SSH-keys as authentication.
[2018-07-03 13:37:08,162] [HTTP-85] ERROR com.google.gerrit.httpd.restapi.RestApiServlet : Error in GET /changes/?q=is:open+is:wip+owner:self&q=is:open+-is:wip+owner:self&q=is:open+((reviewer:self+-owner:self+-is:ignored)+OR+assignee:self)&q=is:closed+(owner:self+OR+reviewer:self+OR+assignee:self)+-age:4w+limit:10&O=881
com.google.gwtorm.server.OrmException: unable to check permissions
at com.google.gerrit.server.query.change.ChangeIsVisibleToPredicate.match(ChangeIsVisibleToPredicate.java:67)
at com.google.gerrit.server.query.change.ChangeIsVisibleToPredicate.match(ChangeIsVisibleToPredicate.java:29)
at com.google.gerrit.index.query.AndSource.match(AndSource.java:147)
at com.google.gerrit.index.query.AndSource.readImpl(AndSource.java:101)
at com.google.gerrit.index.query.AndSource.read(AndSource.java:83)
at com.google.gerrit.index.query.QueryProcessor.query(QueryProcessor.java:238)
at com.google.gerrit.index.query.QueryProcessor.query(QueryProcessor.java:174)
at com.google.gerrit.server.query.change.QueryChanges.query(QueryChanges.java:129)
at com.google.gerrit.server.query.change.QueryChanges.apply(QueryChanges.java:107)
at com.google.gerrit.server.query.change.QueryChanges.apply(QueryChanges.java:38)
at com.google.gerrit.httpd.restapi.RestApiServlet.service(RestApiServlet.java:397)
at javax.servlet.http.HttpServlet.service(HttpServlet.java:729)
at com.google.inject.servlet.ServletDefinition.doServiceImpl(ServletDefinition.java:286)
at com.google.inject.servlet.ServletDefinition.doService(ServletDefinition.java:276)
at com.google.inject.servlet.ServletDefinition.service(ServletDefinition.java:181)
at com.google.inject.servlet.ManagedServletPipeline.service(ManagedServletPipeline.java:91)
at com.google.inject.servlet.FilterChainInvocation.doFilter(FilterChainInvocation.java:85)
at com.google.gerrit.httpd.raw.StaticModule$PolyGerritFilter.doFilter(StaticModule.java:451)
at com.google.inject.servlet.FilterChainInvocation.doFilter(FilterChainInvocation.java:82)
at com.google.gerrit.httpd.GetUserFilter.doFilter(GetUserFilter.java:75)
at com.google.inject.servlet.FilterChainInvocation.doFilter(FilterChainInvocation.java:82)
at com.google.gerrit.httpd.RequireSslFilter.doFilter(RequireSslFilter.java:72)
at com.google.inject.servlet.FilterChainInvocation.doFilter(FilterChainInvocation.java:82)
at com.google.gerrit.httpd.RunAsFilter.doFilter(RunAsFilter.java:122)
at com.google.inject.servlet.FilterChainInvocation.doFilter(FilterChainInvocation.java:82)
at com.google.gwtexpui.server.CacheControlFilter.doFilter(CacheControlFilter.java:69)
at com.google.inject.servlet.FilterChainInvocation.doFilter(FilterChainInvocation.java:82)
at com.google.gerrit.httpd.RequestMetricsFilter.doFilter(RequestMetricsFilter.java:57)
at com.google.inject.servlet.FilterChainInvocation.doFilter(FilterChainInvocation.java:82)
at com.google.gerrit.httpd.AllRequestFilter$FilterProxy$1.doFilter(AllRequestFilter.java:133)
at com.google.gerrit.httpd.AllRequestFilter$FilterProxy.doFilter(AllRequestFilter.java:135)
at com.google.inject.servlet.FilterChainInvocation.doFilter(FilterChainInvocation.java:82)
at com.google.gerrit.httpd.RequestContextFilter.doFilter(RequestContextFilter.java:69)
at com.google.inject.servlet.FilterChainInvocation.doFilter(FilterChainInvocation.java:82)
at com.google.inject.servlet.ManagedFilterPipeline.dispatch(ManagedFilterPipeline.java:120)
at com.google.inject.servlet.GuiceFilter.doFilter(GuiceFilter.java:135)
at org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1759)
at org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:582)
at org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:224)
at org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1180)
at org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:512)
at org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:185)
at org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1112)
at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:141)
at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:134)
at org.eclipse.jetty.server.Server.handle(Server.java:534)
at org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:320)
at org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:251)
at org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:283)
at org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:108)
at org.eclipse.jetty.io.SelectChannelEndPoint$2.run(SelectChannelEndPoint.java:93)
at org.eclipse.jetty.util.thread.strategy.ExecuteProduceConsume.executeProduceConsume(ExecuteProduceConsume.java:303)
at org.eclipse.jetty.util.thread.strategy.ExecuteProduceConsume.produceConsume(ExecuteProduceConsume.java:148)
at org.eclipse.jetty.util.thread.strategy.ExecuteProduceConsume.run(ExecuteProduceConsume.java:136)
at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:671)
at org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:589)
at java.lang.Thread.run(Thread.java:748)
Caused by: com.google.gerrit.server.permissions.PermissionBackendException: not found
at com.google.gerrit.server.permissions.FailedPermissionBackend$FailedChange.test(FailedPermissionBackend.java:170)
at com.google.gerrit.server.permissions.PermissionBackend$ForChange.test(PermissionBackend.java:375)
at com.google.gerrit.server.query.change.ChangeIsVisibleToPredicate.match(ChangeIsVisibleToPredicate.java:65)
... 56 more
EDIT: Since this question, I messed up a bit more and trying to re-init the site instead
My solution was to re-index the index directory and run the Gerrit initialization command again.
Related
We have a number of wildfly 18 servers running standalone but connected via message queues and infinispan caches.
Recently we had an outage, and started seeing this error on a few of the servers :
12:51:08,437 ERROR [org.infinispan.interceptors.impl.InvocationContextInterceptor] (timeout-thread--p5-t1) ISPN000136: Error executing command RemoveExpiredCommand on Cache 'studiomanager', writing keys [20003$1860529424$187762]: org.infinispan.util.concurrent.TimeoutException: ISPN000476: Timed out waiting for responses for request 7916397 from node4
at org.infinispan.remoting.transport.impl.SingleTargetRequest.onTimeout(SingleTargetRequest.java:65)
at org.infinispan.remoting.transport.AbstractRequest.call(AbstractRequest.java:87)
at org.infinispan.remoting.transport.AbstractRequest.call(AbstractRequest.java:22)
at java.util.concurrent.FutureTask.run(FutureTask.java:266) [rt.jar:1.8.0_181]
at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$201(ScheduledThreadPoolExecutor.java:180) [rt.jar:1.8.0_181]
at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:293) [rt.jar:1.8.0_181]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) [rt.jar:1.8.0_181]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) [rt.jar:1.8.0_181]
at java.lang.Thread.run(Thread.java:748) [rt.jar:1.8.0_181]
Seems fairly clear except that the 'studiomanager' cache is defined as a local cache. It does make use of lifespan/eviction, which explains the first part of the error, but not the second part. Why is a local cache seemingly timing out waiting for another server as the second part of the error seems to show?
We do have some replicated caches too, but this is not one of them.
From the standalone.xml file :
<cache-container name="InfinispanSystem" default-cache="systemSettings">
<transport/>
<local-cache name="studiomanager"/>
<replicated-cache name="systemSettings">
<locking isolation="REPEATABLE_READ"/>
</replicated-cache>
</snip ...>
</cache-container>
I have hunted for anything that vaguely looks like the same problem, but have come up largely blank. Can anyone shed any light on how or why this could happen?
EDIT:
As requested, the infinispan version from the Wildfly log is :
Infinispan 'Infinity Minus ONE +2' 9.4.16.Final
The servers are running on docker, each using the same image so the standalone.xml config is largely the same on all of them. We do have some cli scripts that make changes to individual nodes, but the Infinispan subsystem isn't touched by them.
I run a local vault dev server (v0.10.1) and use Approle as auth method. I create a renewable MongoDB secret engine, and then assign a policy to created Approle which grants all capabilities to path secret/bootstrap, secret/application, database/creds/readwrite* and sys/leases/*.
Using spring-cloud-vault(v1.1.0), it could properly gets username/password of MongoDB after launched. But when the lease is reaching its ttl and spring-cloud-vault tries to renew it, I got the following exception:
2018-05-03 20:16:12.369 WARN 2921 --- [g-Cloud-Vault-1] LeaseEventPublisher$LoggingErrorListener : [RequestedSecret [path='database/creds/readwrite', mode=RENEW]] Lease [leaseId='database/creds/readwrite/200fad65-2165-9da4-206f-bb65c93cfdaa', leaseDuration=300, renewable=true] Status 403: permission denied
org.springframework.vault.VaultException: Status 403: permission denied
at org.springframework.vault.client.VaultResponses.buildException(VaultResponses.java:62) ~[spring-vault-core-1.1.1.RELEASE.jar:1.1.1.RELEASE]
at org.springframework.vault.core.VaultTemplate.doWithSession(VaultTemplate.java:321) ~[spring-vault-core-1.1.1.RELEASE.jar:1.1.1.RELEASE]
at org.springframework.vault.core.lease.SecretLeaseContainer.renew(SecretLeaseContainer.java:519) ~[spring-vault-core-1.1.1.RELEASE.jar:1.1.1.RELEASE]
at org.springframework.vault.core.lease.SecretLeaseContainer.doRenewLease(SecretLeaseContainer.java:487) ~[spring-vault-core-1.1.1.RELEASE.jar:1.1.1.RELEASE]
at org.springframework.vault.core.lease.SecretLeaseContainer$1.renewLease(SecretLeaseContainer.java:437) [spring-vault-core-1.1.1.RELEASE.jar:1.1.1.RELEASE]
at org.springframework.vault.core.lease.SecretLeaseContainer$LeaseRenewalScheduler$1.run(SecretLeaseContainer.java:678) [spring-vault-core-1.1.1.RELEASE.jar:1.1.1.RELEASE]
at org.springframework.scheduling.support.DelegatingErrorHandlingRunnable.run(DelegatingErrorHandlingRunnable.java:54) [spring-context-4.3.14.RELEASE.jar:4.3.14.RELEASE]
at org.springframework.scheduling.concurrent.ReschedulingRunnable.run(ReschedulingRunnable.java:81) [spring-context-4.3.14.RELEASE.jar:4.3.14.RELEASE]
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) [na:1.8.0_152]
at java.util.concurrent.FutureTask.run(FutureTask.java:266) [na:1.8.0_152]
at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$201(ScheduledThreadPoolExecutor.java:180) [na:1.8.0_152]
at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:293) [na:1.8.0_152]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) [na:1.8.0_152]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) [na:1.8.0_152]
at java.lang.Thread.run(Thread.java:748) [na:1.8.0_152]
May I know what I missed for the case please?
UPDATE:
I changed the path from sys/leases/* to sys/* and then seems everything works fine. So I still want to know what paths in sys besides sys/leases/* are necessary for the case.
As mentioned in comments, the problem itself seems be resolved in spring-vault-core 2.1.1.BUILD-SNAPSHOT, but there's still lease renewing problem seems not be resolved yetExpired leases do are not rotated on secret renewal.
I've got two artifactory instances with one serving as primary docker registry behind an apache2 proxy. Now, I'd like to have the second one acting also as a docker registry but with a remote registry which points to the primary instance.
When trying that I got this message when testing the active replication:
Error testing pull replication config: Unknown host 'api: Name or service not known
Here is the full stacktrace in the logs:
2017-10-26 15:30:58,004 [art-exec-3] [ERROR] (o.a.a.c.BasicStatusHolder:212) - Error occurred while performing folder replication for 'private-docker-registry:': api
java.net.UnknownHostException: api
at java.net.InetAddress.getAllByName0(InetAddress.java:1280) ~[na:1.8.0_121]
at java.net.InetAddress.getAllByName(InetAddress.java:1192) ~[na:1.8.0_121]
at java.net.InetAddress.getAllByName(InetAddress.java:1126) ~[na:1.8.0_121]
at org.apache.http.impl.conn.SystemDefaultDnsResolver.resolve(SystemDefaultDnsResolver.java:45) ~[httpclient-4.5.1.jar:4.5.1]
at org.apache.http.impl.conn.DefaultHttpClientConnectionOperator.connect(DefaultHttpClientConnectionOperator.java:111) ~[httpclient-4.5.1.jar:4.5.1]
at org.apache.http.impl.conn.PoolingHttpClientConnectionManager.connect(PoolingHttpClientConnectionManager.java:353) ~[httpclient-4.5.1.jar:4.5.1]
at org.apache.http.impl.execchain.MainClientExec.establishRoute(MainClientExec.java:380) ~[httpclient-4.5.1.jar:4.5.1]
at org.apache.http.impl.execchain.MainClientExec.execute(MainClientExec.java:236) ~[httpclient-4.5.1.jar:4.5.1]
at org.apache.http.impl.execchain.ProtocolExec.execute(ProtocolExec.java:184) ~[httpclient-4.5.1.jar:4.5.1]
at org.apache.http.impl.execchain.RetryExec.execute(RetryExec.java:88) ~[httpclient-4.5.1.jar:4.5.1]
at org.apache.http.impl.execchain.RedirectExec.execute(RedirectExec.java:110) ~[httpclient-4.5.1.jar:4.5.1]
at org.apache.http.impl.client.InternalHttpClient.doExecute(InternalHttpClient.java:184) ~[httpclient-4.5.1.jar:4.5.1]
at org.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:71) ~[httpclient-4.5.1.jar:4.5.1]
at org.jfrog.client.http.CloseableHttpClientDecorator.doExecute(CloseableHttpClientDecorator.java:90) ~[jfrog-http-client-1.2.4.jar:na]
at org.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:82) ~[httpclient-4.5.1.jar:4.5.1]
at org.artifactory.repo.HttpRepo.doExecuteMethod(HttpRepo.java:493) ~[artifactory-core-5.2.0.jar:na]
at org.artifactory.repo.HttpRepo.executeMethod(HttpRepo.java:510) ~[artifactory-core-5.2.0.jar:na]
at org.artifactory.repo.HttpRepo.executeMethod(HttpRepo.java:461) ~[artifactory-core-5.2.0.jar:na]
at org.artifactory.addon.replication.core.context.RemoteReplicationRequestExecutor.execute(RemoteReplicationRequestExecutor.java:28) ~[artifactory-addon-replication-5.2.0.jar:na]
at org.artifactory.addon.replication.core.context.server.TargetServerInfoResolver.executeRequestAndSetDetails(TargetServerInfoResolver.java:92) ~[artifactory-addon-replication-5.2.0.jar:na]
at org.artifactory.addon.replication.core.context.server.TargetServerInfoResolver.resolveTargetInfo(TargetServerInfoResolver.java:49) ~[artifactory-addon-replication-5.2.0.jar:na]
at org.artifactory.addon.replication.core.BaseReplicationProducer.resolveTargetInfo(BaseReplicationProducer.java:92) ~[artifactory-addon-replication-5.2.0.jar:na]
at org.artifactory.addon.replication.core.BaseReplicationProducer.run(BaseReplicationProducer.java:78) ~[artifactory-addon-replication-5.2.0.jar:na]
at org.artifactory.addon.replication.core.remote.RemoteReplicator.replicate(RemoteReplicator.java:56) [artifactory-addon-replication-5.2.0.jar:na]
at org.artifactory.addon.replication.core.remote.RemoteReplicator.replicate(RemoteReplicator.java:29) [artifactory-addon-replication-5.2.0.jar:na]
at org.artifactory.addon.replication.core.ReplicationAddonImpl.performRemoteReplication(ReplicationAddonImpl.java:91) [artifactory-addon-replication-5.2.0.jar:na]
at org.artifactory.repo.replication.RemoteReplicationJob.onExecute(RemoteReplicationJob.java:101) [artifactory-core-5.2.0.jar:na]
at org.artifactory.schedule.quartz.QuartzCommand.execute(QuartzCommand.java:52) [artifactory-storage-common-5.2.0.jar:na]
at org.quartz.core.JobRunShell.run(JobRunShell.java:202) [quartz-2.2.1.jar:na]
at org.artifactory.schedule.ArtifactoryConcurrentExecutor$RunnableWrapper.run(ArtifactoryConcurrentExecutor.java:104) [artifactory-storage-common-5.2.0.jar:na]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [na:1.8.0_121]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [na:1.8.0_121]
at java.lang.Thread.run(Thread.java:745) [na:1.8.0_121]
What am I doing wrong ?
Same thing occurs when trying to replicate my private account from dockerhub
Thank you for your help
EDIT:
Ok now, I managed to partially get this working. I'm saying "partially" because it's actually not anymore displaying a stacktrace and the different image references are synced but for some reason the layers themselves are not copied over to the remote. Looking closer to the logs, it looks like something wrong with permissions:
2017-11-26 23:30:08,611 [replication-consumer-1511713803259-0] [WARN ] (o.a.r.s.RepositoryServiceImpl:901) - Cannot set properties on 'remote-docker-cache:my/irc-base/0.1.0-b25-09e84a42/sha256__096495a59c0e938508a5c9d4cb003d5e4556e0fb8f1befd9469903a6d446e797': Item not found.
2017-11-26 23:30:08,611 [replication-consumer-1511713803259-0] [ERROR] (o.a.a.c.BasicStatusHolder:214) - Unable to set properties for remote-docker-cache:my/image/0.1.0-b25-09e84a42/sha256__096495a59c0e938508a5c9d4cb003d5e4556e0fb8f1befd9469903a6d446e797
2017-11-26 23:30:10,947 [replication-consumer-1511713803259-0] [WARN ] (o.a.r.s.RepositoryServiceImpl:901) - Cannot set properties on 'remote-docker-cache:my/image/0.1.0-b25-09e84a42/sha256__5523a881c6c86f188888bba730867591402f40db1be718c64726b1723c5abbf5': Item not found.
2017-11-26 23:30:10,947 [replication-consumer-1511713803259-0] [ERROR] (o.a.a.c.BasicStatusHolder:214) - Unable to set properties for remote-docker-cache:my/image/0.1.0-b25-09e84a42/sha256__5523a881c6c86f188888bba730867591402f40db1be718c64726b1723c5abbf5
2017-11-26 23:30:12,145 [replication-consumer-1511713803259-0] [WARN ] (o.a.r.s.RepositoryServiceImpl:901) - Cannot set properties on 'remote-docker-cache:my/image/0.1.0-b25-09e84a42/sha256__6b888ef3098531f0c7000584ce049b24e4559cfab8c4141fcf62bcfd60f6b177': Item not found.
2017-11-26 23:30:12,145 [replication-consumer-1511713803259-0] [ERROR] (o.a.a.c.BasicStatusHolder:214) - Unable to set properties for remote-docker-cache:my/image/0.1.0-b25-09e84a42/sha256__6b888ef3098531f0c7000584ce049b24e4559cfab8c4141fcf62bcfd60f6b177
In the same time on the registry hosting the layers:
20171126173008|1|REQUEST|192.168.210.102|admin|GET|/api/storage/docker/remote-docker/my/image/0.1.0-b25-09e84a42/sha256__096495a59c0e938508a5c9d4cb003d5e4556e0fb8f1befd9469903a6d446e797|HTTP/1.0|200|0
20171126173008|1|REQUEST|192.168.210.102|anonymous|HEAD|/api/docker/remote-docker/my/image/0.1.0-b25-09e84a42/sha256__5523a881c6c86f188888bba730867591402f40db1be718c64726b1723c5abbf5|HTTP/1.0|403|0
20171126173010|0|REQUEST|192.168.210.102|non_authenticated_user|GET|/api/storage/docker/remote-docker/my/image/0.1.0-b25-09e84a42/sha256__5523a881c6c86f188888bba730867591402f40db1be718c64726b1723c5abbf5|HTTP/1.0|401|0
20171126173010|0|REQUEST|192.168.210.102|admin|GET|/api/storage/docker/remote-docker/my/image/0.1.0-b25-09e84a42/sha256__5523a881c6c86f188888bba730867591402f40db1be718c64726b1723c5abbf5|HTTP/1.0|200|0
20171126173011|0|REQUEST|192.168.210.102|anonymous|HEAD|/api/docker/docker/remote-docker/my/image/0.1.0-b25-09e84a42/sha256__6b888ef3098531f0c7000584ce049b24e4559cfab8c4141fcf62bcfd60f6b177|HTTP/1.0|403|0
20171126173011|0|REQUEST|192.168.210.102|non_authenticated_user|GET|/api/storage/docker/remote-docker/my/image/0.1.0-b25-09e84a42/sha256__6b888ef3098531f0c7000584ce049b24e4559cfab8c4141fcf62bcfd60f6b177|HTTP/1.0|401|0
I'm trying to do the replication with the admin user which should have full access to the different registries.
Now the funny thing (maybe not so funny) is that if I allow the user Anonymous to have access to the registry, the replication works just fine. However, from a security point of view I cannot just let Anonymous access on these private registries.
Thanks again for your help
All credit goes to Yonatan from JFrog support
I finally managed to solve the issue thanks to JFrog support and here is what I was doing wrong and how it should be solved:
In the remote repository settings, I put the following as target url:
https://myregistry.example.com.
I was then kindly suggested by jFrog support to prefix the url with /api/docker/myregistry having the the following as target url:
https://myregistry.example.com/api/docker/myregistry
More inforation can be found here.
Edit:
Here is the exact reply from JFrog support (which might be more accurate than my trial to bulky translate what I understood):
"The issue you are experiencing is due to a misconfiguration in the target URL.
For some packaging formats, when using the corresponding client to access a repository through Artifactory, the repository key in the URL needs to be prefixed with api/ in the path. For example, in the case of Docker repositories, the repository key should be prefixed with api/docker.
Nevertheless, there are exceptions to this rule. For example, when replicating Maven repositories, you do not need to add a prefix the remote repository path. (This is the reason why you did not encounter issues with replicating Maven repositories)
You can find the full list here.
With regards to your scenario, please try to configure the following URL:
https://myregistry.example.com/api/docker/myregistry
Please note you have to add the target repository name."
Thank you Yonatan
I am trying to connect S3 data store following this instructions. I am getting exact error described in this SOF question.
Steps:
Created a vanilla AEM 6.3 instance and able to upload images to DAM
Downloaded S3 connector and copied all .jar files into crx-quickstart/install folder
Copied org.apache.jackrabbit.oak.segment.SegmentNodeStoreService.config file and set customBlobStore=B"true"
Copied org.apache.jackrabbit.oak.plugins.blob.datastore.S3DataStore.config file and looks like this:
accessKey="scribed" connectionTimeout="120000" maxConnections="40" maxErrorRetry="10" s3Bucket="myproj-s3bucket" s3Region="ap-southeast-1" s3EndPoint="https://scribed.signin.aws.amazon.com/console" secretKey="scribed" socketTimeout="120000" writeThreads="30" cacheSize="16GB" cachePurgeTrigFactory="1"
(have scribed the key and secret)
When I restart my AEM none of consoles start. It throws
HTTP ERROR: 503 Problem accessing /. Reason: AuthenticationSupport service missing. Cannot authenticate request.
This is the exception trace:
15.05.2017 07:42:56.156 *INFO* [FelixStartLevel] org.apache.jackrabbit.oak.blob.cloud.s3.Utils Configuring Amazon Client from property file.
15.05.2017 07:42:59.401 *INFO* [FelixStartLevel] org.apache.jackrabbit.oak.blob.cloud.s3.Utils S3 service endpoint [https://170564245278.signin.aws.amazon.com/console]
15.05.2017 07:43:04.292 *ERROR* [FelixStartLevel] org.apache.jackrabbit.oak-blob-cloud [org.apache.jackrabbit.oak.plugins.blob.datastore.S3DataStore(2946)] The activate method has thrown an exception (java.lang.NullPointerException: null value in entry: component.id=null) java.lang.NullPointerException: null value in entry: component.id=null at com.google.common.collect.CollectPreconditions.checkEntryNotNull(CollectPreconditions.java:33) at com.google.common.collect.ImmutableMap.entryOf(ImmutableMap.java:135) at com.google.common.collect.ImmutableMap$Builder.put(ImmutableMap.java:206) at com.google.common.collect.Maps.fromProperties(Maps.java:1187) at org.apache.jackrabbit.oak.blob.cloud.s3.S3Backend.init(S3Backend.java:166) at org.apache.jackrabbit.oak.plugins.blob.AbstractSharedCachingDataStore.init(AbstractSharedCachingDataStore.java:163) at org.apache.jackrabbit.oak.plugins.blob.datastore.AbstractDataStoreService.activate(AbstractDataStoreService.java:87) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.apache.felix.scr.impl.inject.BaseMethod.invokeMethod(BaseMethod.java:224) at org.apache.felix.scr.impl.inject.BaseMethod.access$500(BaseMethod.java:39) at org.apache.felix.scr.impl.inject.BaseMethod$Resolved.invoke(BaseMethod.java:617) at org.apache.felix.scr.impl.inject.BaseMethod.invoke(BaseMethod.java:501) at org.apache.felix.scr.impl.inject.ActivateMethod.invoke(ActivateMethod.java:302) at org.apache.felix.scr.impl.inject.ActivateMethod.invoke(ActivateMethod.java:294) at org.apache.felix.scr.impl.manager.SingleComponentManager.createImplementationObject(SingleComponentManager.java:298) at org.apache.felix.scr.impl.manager.SingleComponentManager.createComponent(SingleComponentManager.java:109) at org.apache.felix.scr.impl.manager.SingleComponentManager.getService(SingleComponentManager.java:906) at org.apache.felix.scr.impl.manager.SingleComponentManager.getServiceInternal(SingleComponentManager.java:879) at org.apache.felix.scr.impl.manager.AbstractComponentManager.activateInternal(AbstractComponentManager.java:749) at org.apache.felix.scr.impl.manager.AbstractComponentManager.enableInternal(AbstractComponentManager.java:675) at org.apache.felix.scr.impl.manager.AbstractComponentManager.enable(AbstractComponentManager.java:430) at org.apache.felix.scr.impl.manager.ConfigurableComponentHolder.enableComponents(ConfigurableComponentHolder.java:657) at org.apache.felix.scr.impl.BundleComponentActivator.initialEnable(BundleComponentActivator.java:341) at org.apache.felix.scr.impl.Activator.loadComponents(Activator.java:390) at org.apache.felix.scr.impl.Activator.access$200(Activator.java:54) at org.apache.felix.scr.impl.Activator$ScrExtension.start(Activator.java:265) at org.apache.felix.utils.extender.AbstractExtender.createExtension(AbstractExtender.java:259) at org.apache.felix.utils.extender.AbstractExtender.modifiedBundle(AbstractExtender.java:232) at org.osgi.util.tracker.BundleTracker$Tracked.customizerModified(BundleTracker.java:482) at org.osgi.util.tracker.BundleTracker$Tracked.customizerModified(BundleTracker.java:415) at org.osgi.util.tracker.AbstractTracked.track(AbstractTracked.java:232) at org.osgi.util.tracker.BundleTracker$Tracked.bundleChanged(BundleTracker.java:444) at org.apache.felix.framework.util.EventDispatcher.invokeBundleListenerCallback(EventDispatcher.java:916) at org.apache.felix.framework.util.EventDispatcher.fireEventImmediately(EventDispatcher.java:835) at org.apache.felix.framework.util.EventDispatcher.fireBundleEvent(EventDispatcher.java:517) at org.apache.felix.framework.Felix.fireBundleEvent(Felix.java:4542) at org.apache.felix.framework.Felix.startBundle(Felix.java:2173) at org.apache.felix.framework.Felix.setActiveStartLevel(Felix.java:1372) at org.apache.felix.framework.FrameworkStartLevelImpl.run(FrameworkStartLevelImpl.java:308) at java.lang.Thread.run(Thread.java:745)
15.05.2017 07:43:04.308 *INFO* [FelixStartLevel] com.day.cq.cq-compat-codeupgrade BundleEvent RESOLVED
15.05.2017 07:43:04.310 *INFO* [FelixStartLevel] com.day.cq.cq-compat-codeupgrade BundleEvent STARTING
15.05.2017 07:43:04.310 *INFO* [FelixStartLevel] com.day.cq.cq-compat-codeupgrade BundleEvent STARTED
Am I missing any steps or config? Please help out
I got the answer to my question with help of my lead comparing the working config against failed. This parameter was incorrect:
s3EndPoint="https://scribed.signin.aws.amazon.com/console"
This can be blank as the connector will rebuild using s3Region. or it is https://region.aws.amazon.com. Since the error logs were throwing irrelevant errors, I was misguided. Removing this one parameter made difference.
Second observation was, while starting AEM, initially it does throw the error. But eventually it starts up. Need to wait for 3-4 mins. On logs I see connection refused during startup. But on subsequent request once all config is loaded, it is able to connect and upload successfully.
I have downloaded Spark 2.1.0 and Apache Zeppelin 0.7.0. I have edited my environment variables for Spark, Java and Winutils (as i am using windows 10). I have edited the zeppelin-env.sh and zeppelin-site.xml to export my JAVA_HOME and SPARK_HOME. I edited zeppelin-site.xml to change the port number as 8080 was already in use. I have opened command line as an administrator, changed to the zeppelin-0.7.0-bin-all directory and run the command bin\zeppelin.cmd. I opened the browser and navigated to localhost:8090. Zeppelin opens and the green light in the top right corner appears suggesting i am connected to the server. Once i run the "load data into table" tutorial i receive an error:
java.net.ConnectException: Connection refused: connect
at java.net.DualStackPlainSocketImpl.connect0(Native Method)
at java.net.DualStackPlainSocketImpl.socketConnect(DualStackPlainSocketImpl.java:79)
at java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:350)
at java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:206)
at java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:188)
at java.net.PlainSocketImpl.connect(PlainSocketImpl.java:172)
at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392)
at java.net.Socket.connect(Socket.java:589)
at org.apache.thrift.transport.TSocket.open(TSocket.java:182)
at org.apache.zeppelin.interpreter.remote.ClientFactory.create(ClientFactory.java:51)
at org.apache.zeppelin.interpreter.remote.ClientFactory.create(ClientFactory.java:37)
at org.apache.commons.pool2.BasePooledObjectFactory.makeObject(BasePooledObjectFactory.java:60)
at org.apache.commons.pool2.impl.GenericObjectPool.create(GenericObjectPool.java:861)
at org.apache.commons.pool2.impl.GenericObjectPool.borrowObject(GenericObjectPool.java:435)
at org.apache.commons.pool2.impl.GenericObjectPool.borrowObject(GenericObjectPool.java:363)
at org.apache.zeppelin.interpreter.remote.RemoteInterpreterProcess.getClient(RemoteInterpreterProcess.java:90)
at org.apache.zeppelin.interpreter.remote.RemoteInterpreter.init(RemoteInterpreter.java:209)
at org.apache.zeppelin.interpreter.remote.RemoteInterpreter.getFormType(RemoteInterpreter.java:375)
at org.apache.zeppelin.interpreter.LazyOpenInterpreter.getFormType(LazyOpenInterpreter.java:105)
at org.apache.zeppelin.notebook.Paragraph.jobRun(Paragraph.java:365)
at org.apache.zeppelin.scheduler.Job.run(Job.java:175)
at org.apache.zeppelin.scheduler.RemoteScheduler$JobRunner.run(RemoteScheduler.java:329)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$201(ScheduledThreadPoolExecutor.java:180)
at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:293)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
No logs are available in the logs folder. Everything is printed in the command line. The %md interpreter works as the code from "Welcome to Zeppelin" works fine after i run it.
I have tried altering the memory as per ZEPPELIN_305 and ZEPPELIN-449 on issues.apache.org. I have tried turning off windows firewall. I have uninstalled and reinstalled to ensure it wasn't an error i caused myself but nothing seems to work.
Has anybody had the same/similar problem? Any help would be much appreciated.
Your issue might be in the editing the wrong config file. Since you are on Windows 10 you want to edit the zeppelin-env.cmd (and not the zeppelin-env.sh which is intended for Linux). See the link to the Zeppelin install guide.