I have an API where I want nuget packages to update with my own feed, but when I get to this UpdateCheckers setting it shows me update_not_possible.
The project is hosted on azure just like the nugets feed
When the build is executed, it only executes and does not create pull request, the se only in Found # {dep.name} # # {dep.version} ...
require "dependabot/file_fetchers"
require "dependabot/file_parsers"
require "dependabot/update_checkers"
require "dependabot/file_updaters"
require "dependabot/pull_request_creator"
require "dependabot/pull_request_updater"
require "dependabot/omnibus"
package_manager = "nuget"
repo_name = "/myproject/_git/WebApi.Dummy"
directory = "src/WebApi.Dummy"
branch= "Dependabot"
azure_hostname = "https://dev.azure.com/myorganization/myproject" || "dev.azure.com"
credentials = [{
"type" => "git_source",
"host" => azure_hostname,
"username" => "x-access-token",
"password" => "my_personal_token"
},{
"type" => "nuget_feed",
"url" => "https://dev.azure.com/myorganization/myproject/_packaging/myfeedNuget/nuget/v3/index.json",
"token" => ":my_personal_token"
}
]
source = Dependabot::Source.new(
provider: "azure",
hostname: azure_hostname,
api_endpoint: "http://#{azure_hostname}/",
repo: repo_name,
directory: directory,
branch: branch
)
puts "Fetching #{package_manager} dependency files for #{repo_name}"
fetcher = Dependabot::FileFetchers.for_package_manager(package_manager).new(
source: source,
credentials: credentials,
)
files = fetcher.files
commit = fetcher.commit
parser = Dependabot::FileParsers.for_package_manager(package_manager).new(
dependency_files: files,
source: source,
credentials: credentials,
)
dependencies = parser.parse
dependencies.select(&:top_level?).each do |dep|
puts "Found #{dep.name} # #{dep.version}..."
checker = Dependabot::UpdateCheckers.for_package_manager(package_manager).new(
dependency: dep,
dependency_files: files,
credentials: credentials,
)
if checker.up_to_date?
puts " already using latest version"
next
end
requirements_to_unlock =
if !checker.requirements_unlocked_or_can_be?
if checker.can_update?(requirements_to_unlock: :none) then :none
else :update_not_possible
end
elsif checker.can_update?(requirements_to_unlock: :own) then :own
elsif checker.can_update?(requirements_to_unlock: :all) then :all
else :update_not_possible
end
next if requirements_to_unlock == :update_not_possible
updated_deps = checker.updated_dependencies(
requirements_to_unlock: requirements_to_unlock
)
puts " considering upgrade to #{checker.latest_version}"
updater = Dependabot::FileUpdaters.for_package_manager(package_manager).new(
dependencies: updated_deps,
dependency_files: files,
credentials: credentials,
)
updated_files = updater.updated_dependency_files
pr_creator = Dependabot::PullRequestCreator.new(
source: source,
base_commit: commit,
dependencies: updated_deps,
files: updated_files,
credentials: credentials,
label_language: true,
author_details: {
email: "dependabot#bccr.fi.cr",
name: "dependabot"
},
)
pull_request = pr_creator.create
if pull_request&.status == 201
content = JSON[pull_request.body]
puts " PR ##{content["pullRequestId"]} submitted"
ENV['PR_ID']="##{content["pullRequestId"]}"
else
puts " PR already exists or an error has occurred"
end
next unless pull_request
end
puts "Done"
Related
I'm trying to connect to a RabbitMQ instance using the ampq package on Elixir, but at times the RabbitMQ instance won't be available at the time that the Elixir server is running. I was wondering how I might be able to implement a simple retry mechanism. There's one strategy here but that seems more involved than I feel necessary especially since there's a mention of it on the README about more information being found on the official docs. I unfortunately couldn't find anything.
Edit: This will crash the application on start and exit.
My code for the module is as follows:
Server.Gen.Rabbit (child)
defmodule Server.Gen.Rabbit do
use GenServer
use AMQP
defmodule State do
#type t :: %{
id: String.t(),
chan: map()
}
defstruct id: "", chan: nil
end
def start_link(%{id: id}) do
GenServer.start_link(
__MODULE__,
%State{
id: id
},
name: :"#{id}:rabbit"
)
end
def init(opts) do
host = "amqp://guest:guest#localhost"
case Connection.open(host) do
{:ok, conn} ->
{:ok, chan} = Channel.open(conn)
setup_queue(opts.id, chan)
:ok = Basic.qos(chan, prefetch_count: 1)
queue_to_consume = #online_receive_queue <> opts.id
IO.puts("queue_to_consume_online: " <> queue_to_consume)
{:ok, _consumer_tag} = Basic.consume(chan, queue_to_consume, nil, no_ack: true)
{:ok, %State{chan: chan, id: opts.id}}
{:error, _} ->
IO.puts("[Rabbit] error on connecting to server: #{host}")
{:backoff, 5_000}
end
end
Server (parent)
defmodule Server do
use Application
def start(_type, _args) do
import Supervisor.Spec, warn: false
children = [
{
GenRegistry,
worker_module: Server.Gen.Rabbit
},
Plug.Cowboy.child_spec(
scheme: :http,
plug: Server.Router,
options: [
port: String.to_integer(System.get_env("PORT") || "3000"),
dispatch: dispatch(),
protocol_options: [idle_timeout: :infinity]
]
)
]
opts = [strategy: :one_for_one, name: Server.Supervisor]
Supervisor.start_link(children, opts)
end
end
Aloha,
TL&DR:
I am trying to create an s3 bucket locally by using terraform instead of awscli or awslocal and i am running in some errors. I am wondering if this way is even supported by localstack. I am not sure what i did wrong here but i guess i need to use the awscli here to create s3 buckets. Anyone has an idea why the bucket name is not forwarded?
Long Version:
I am using a docker-compose.yaml to define the localstack docker container:
version: '3'
services:
localstack:
image: localstack/localstack:0.10.5
ports:
- "4572:4572"
- "4584:4584"
- "${PORT_WEB_UI-8080}:${PORT_WEB_UI-8080}"
environment:
- DEFAULT_REGION=eu-central-1
- SERVICES=s3,secretsmanager
- DEBUG=${DEBUG- }
- DATA_DIR=${DATA_DIR- }
- PORT_WEB_UI=${PORT_WEB_UI- }
- DOCKER_HOST=${LOCALSTACK_DOCKER_HOST-unix:///var/run/docker.sock}
- TF_VAR_localstack_host=localhost
volumes:
- "/var/run/docker.sock:/var/run/docker.sock"
I use this terraform main.tf to define what i want to create in the docker container:
variable "localstack_host" {
default = "localhost"
}
provider "aws" {
version = "~> 2.39.0"
alias = "local"
region = "eu-central-1"
access_key = "This is not an actual access key."
secret_key = "This is not an actual secret key."
skip_credentials_validation = true
skip_metadata_api_check = true
skip_requesting_account_id = true
endpoints {
secretsmanager = "http://${var.localstack_host}:4584"
s3 = "http://${var.localstack_host}:4572"
}
}
resource "aws_s3_bucket" "s3_encryption_test_bucket" {
bucket = "s3-encryption-test-bucket"
provider = "aws.local"
}
After running the docker container I then apply the terraform file to the local running instance of localstack:
terraform plan
terraform apply
The error i get from terraform is:
aws_s3_bucket.s3_encryption_test_bucket: Creating...
acceleration_status: "" => "<computed>"
acl: "" => "private"
arn: "" => "<computed>"
bucket: "" => "s3-encryption-test-bucket"
bucket_domain_name: "" => "<computed>"
bucket_regional_domain_name: "" => "<computed>"
force_destroy: "" => "false"
hosted_zone_id: "" => "<computed>"
region: "" => "<computed>"
request_payer: "" => "<computed>"
versioning.#: "" => "<computed>"
website_domain: "" => "<computed>"
website_endpoint: "" => "<computed>"
aws_s3_bucket.s3_encryption_test_bucket: Still creating... (10s elapsed)
aws_s3_bucket.s3_encryption_test_bucket: Still creating... (20s elapsed)
.....
aws_s3_bucket.s3_encryption_test_bucket: Still creating... (2m10s elapsed)
aws_s3_bucket.s3_encryption_test_bucket: Still creating... (2m20s elapsed)
Error: Error applying plan:
1 error(s) occurred:
* aws_s3_bucket.s3_encryption_test_bucket: 1 error(s) occurred:
* aws_s3_bucket.s3_encryption_test_bucket: error getting S3 Bucket CORS configuration: timeout while waiting for state to become 'success' (timeout: 2m0s)
I also looked into the logs of the container and got this error message:
2019-12-12T13:24:45:ERROR:localstack.services.generic_proxy: Error forwarding request: Parameter validation failed:
Invalid bucket name "": Bucket name must match the regex "^[a-zA-Z0-9.\-_]{1,255}$" Traceback (most recent call last):
File "/opt/code/localstack/localstack/services/generic_proxy.py", line 240, in forward
path=path, data=data, headers=forward_headers).......
I hade the same problem, the solution for me was adding s3_force_path_style = true to provider "aws" section:
provider "aws" {
...
s3_force_path_style = true
...
}
I encountered the same issue. Simply defining ACL in resource block solved it for me:
resource "aws_s3_bucket" "s3_encryption_test_bucket" {
bucket = "s3-encryption-test-bucket"
provider = "aws.local"
acl = "private"
}
So I'm writing a script to batch delete users from a Google Apps for Education domain. The code looks like this:
#! /usr/bin/env ruby
require 'google/api_client'
require 'csv'
service_account_email = 'XXXXXXX#developer.gserviceaccount.com'
key_file = 'key.p12'
key_secret = 'notasecret'
admin_email = 'XXX#xxx'
# Build the API Client object
client = Google::APIClient.new(
:application_name => 'XXX',
:application_version => '0.1'
)
key = Google::APIClient::KeyUtils.load_from_pkcs12(key_file, key_secret)
client.authorization = Signet::OAuth2::Client.new(
:token_credential_uri => 'https://accounts.google.com/o/oauth2/token',
:audience => 'https://accounts.google.com/o/oauth2/token',
:scope => 'https://www.googleapis.com/auth/admin.directory.user',
:issuer => service_account_email,
:signing_key => key,
:person => admin_email,
)
client.authorization.fetch_access_token!
directory = client.discovered_api('admin', 'directory_v1')
# Reads and parses CSV input into a hash
# Takes file path as an argument
def import_csv(file)
csv = CSV.new(
File.open(file).read,
:headers => true,
:header_converters => :symbol
)
return csv.to_a.map {|row| row.to_hash}
end
users_to_delete = import_csv('accounts.csv')
puts 'Preparing to delete users...'
users_to_delete.each_slice(1000) do |chunk|
directory.batch do |directory|
chunk.each do |user|
client.execute!(
:api_method => directory.users.delete,
:parameters => { :userKey => user[:emailaddress].downcase }
)
end
end
end
puts 'Users successfully deleted!'
When I run the script without the two outer batch blocks, the script runs perfectly (although incredibly slowly).
What I want to know is what I need to change to stop giving me the undefined method error on the 'batch' method for the directory API. In examples in Google's documentation, I've noticed that they call the API differently (zoo = Google::Apis::ZooV1::ZooService.new instead of zoo = client.discovered_api('zoo', 'v1')). I don't see how that would make a difference though.
You can do achieve it this way:
client = Google::APIClient.new(
:application_name => 'XXX',
:application_version => '0.1'
)
directory = client.discovered_api('admin', 'directory_v1')
batch = Google::APIClient::BatchRequest.new do |result|
puts result.data
end
batch.add(:api_method => directory.users.delete,:parameters => { :userKey => user[:emailaddress].downcase })
client.execute(batch)
I'm experiencing what would seem a very basic problem and that is header files not being found. Below is a basic directory structure for a test POD I've created, my requirement is to retain the directory structure and for both root and SubPods to have access to header files. To retain the directory structure I'm using SubPods, if none of the classes make any reference to other classes the POD validates and the structure is correct. But if TSTSubClasses1.h includes TSTRootClass.h the POD lint validation fails 'fatal error: 'TSTRootClass.h' file not found.
Classes/TSTRootClass.h
Classes/TSTRootClass.m
Classes/SubPod-1/TSTSubClass1.h
Classes/SubPod-1/TSTSubClass1.m
Classes/SubPod-2/TSTSubClass2.h
Classes/SubPod-2/TSTSubClass2.m
I've tried various combinations of public_header_files, preserve_paths & s.xcconfig = { 'HEADER_SEARCH_PATHS' => '${PODS_ROOT}/**'} but no luck. Is there any where that explains how to resolve search issues, also is there any way of outputting PATH details?
Detailed below is the podspec file, the source has been pushed to the repo and the structure and files do exist.
Pod::Spec.new do |s|
s.name = "PODTest"
s.version = "0.1.0"
s.summary = "PODTest dg dghjghj fghj ."
s.description = <<-DESC
sdfg sdfg adfg sdfgn of PODTest
DESC
s.homepage = "http://myhomepage"
s.license = 'MIT'
s.author = { "Duncan Hill" => "myemail#somedomain.com" }
#s.source = { :git => "git#bitbucket.org:reponame/podtest.git"}
s.source = { :git => "git#bitbucket.org:reponame/podtest.git", :tag => s.version.to_s }
s.platform = :ios, '5.0'
s.ios.deployment_target = '5.0'
s.requires_arc = true
#s.xcconfig = { 'HEADER_SEARCH_PATHS' => '${PODS_ROOT}/#{s.name}/**'}
s.xcconfig = { 'HEADER_SEARCH_PATHS' => '${PODS_ROOT}/**'}
s.source_files = "Classes/*.{h,m}"
s.preserve_paths = "Classes/*"
s.subspec "SubPod-1" do |ss|
ss.public_header_files = "Classes/*.h"
ss.source_files = "Classes/SubPod-1/*.{h,m}"
end
s.subspec "SubPod-2" do |ss|
ss.public_header_files = "Classes/*.h"
ss.source_files = "Classes/SubPod-2/*.{h,m}"
end
end
Thanks for any help.
Your subspecs cannot depend on the parent spec, but can depend on other subspecs. You probably want to create some sort of "Core" subspec, then have SubPod 1 and 2 depend on that.
Just integrated the Gibbon, gem and I am getting a hostname does not match the server certificate error thrown back when trying to subscribe a new user?
def subscribe_to_mailchimp testing=false
return true if (Rails.env.test? && !testing)
list_id = ENV['My-list-id']
response = Rails.configuration.mailchimp.lists.subscribe({
id: list_id,
email: {email: email},
double_optin: false,
})
response
end
Slight change in the way that this works from the last time i looked seems to have fixed the issue.
Firstly
def subscribe_to_mailchimp testing=false
return true if (Rails.env.test? && !testing)
list_id = 'My-list-id'
response = Rails.configuration.mailchimp.lists.subscribe({
id: list_id,
email: {email: email},
double_optin: false,
})
response
end
Secondly
in config/initializers/mailchimp.rb
if Rails.env.test?
Gibbon::Export.api_key = "fake"
Gibbon::Export.throws_exceptions = false
end
Gibbon::API.api_key = "YOUR_API_KEY"
Gibbon::API.timeout = 15
Gibbon::API.throws_exceptions = false
Rails.configuration.mailchimp = Gibbon::API.new
Finally in application.rb
require 'openssl'
The Require openssl was what was generating the hostname does not match the server error as the call in the Gibbon app uses https
And if your interested in users.rb
def subscribe_to_mailchimp testing=false
return true if (Rails.env.test? && !testing)
list_id = "YOUR_LIST_ID"
response = Rails.configuration.mailchimp.lists.subscribe({
id: list_id,
email: {email: email},
double_optin: false,
})
response
end