OpenIO swift deny host headers - reverse-proxy

OpenIO 7.2.0.
I have an OpenIO with keystone (queens) auth cluster.
By default any user can configure his own acls and public url.
I would like to restrict user only for read and write in containers and objects.
Apparently deny_host_headers can do the job in proxy-server.conf but it not seems to be working -> nothing append.
I didn't find any "super admin" acls.
Any idea ?
My proxy-server.conf ->
# OpenIO managed
[DEFAULT]
use_stderr = False
bind_ip = ip
bind_port = port
workers = 72
max_clients = 1024
user = openio
log_facility = /dev/log
log_header = true
log_level = INFO
log_name = OIO,OPENIO,oioswift,0
eventlet_debug = false
sds_namespace = OPENIO
sds_proxy_url = http://ip:port
sds_default_account = openio
sds_connection_timeout = 5
sds_read_timeout = 35
sds_write_timeout = 35
sds_pool_connections = 500
sds_pool_maxsize = 500
sds_max_retries = 0
sds_tls = False
[pipeline:main]
pipeline = catch_errors gatekeeper healthcheck proxy-logging cache bulk proxy-logging authtoken keystoneauth proxy-logging copy container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server
[filter:catch_errors]
use = egg:swift#catch_errors
[filter:gatekeeper]
use = egg:swift#gatekeeper
[filter:healthcheck]
use = egg:oioswift#healthcheck
[filter:proxy-logging]
use = egg:swift#proxy_logging
access_log_headers = false
access_log_headers_only =
[filter:cache]
use = egg:swift#memcache
memcache_servers = ip:port
memcache_max_connections = 10
oio_cache = False
oio_cache_ttl = 0
[filter:bulk]
use = egg:swift#bulk
#[filter:tempurl]
#use = egg:swift#tempurl
#[filter:swift3]
#use = egg:swift3#swift3
#force_swift_request_proxy_log = True
#s3_acl = True
#check_bucket_owner = True
#location = us-east-1
#max_bucket_listing = 1000
#max_multi_delete_objects = 1000
#max_upload_part_num = 10000
#log_s3api_command = False
#bucket_db_enabled = True
#bucket_db_prefix = s3bucket:
#storage_domain = s3.openio.io
#bucket_db_master_name = OPENIO-master-1
#bucket_db_sentinel_hosts = ip:port
#[filter:tempauth]
#use = egg:oioswift#tempauth
#user_demo_demo = DEMO_PASS .admin
[filter:copy]
use = egg:oioswift#copy
object_post_as_copy = False
[filter:container-quotas]
use = egg:swift#container_quotas
[filter:account-quotas]
use = egg:swift#account_quotas
[filter:slo]
use = egg:oioswift#slo
max_manifest_segments = 10000
concurrency = 10
[filter:dlo]
use = egg:swift#dlo
[filter:versioned_writes]
use = egg:oioswift#versioned_writes
allow_versioned_writes = True
[app:proxy-server]
use = egg:oioswift#main
object_post_as_copy = False
allow_account_management = True
account_autocreate = True
sds_chunk_checksum_algo =
deny_host_headers = x-container-sync-key, x-container-sync-to, x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, x-account-access-control
[filter:authtoken]
auth_type = password
#username = swift
username = user
project_name = user
region_name = region
user_domain_id = domain
memcache_secret_key = memcache_secret_key
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
insecure = True
cache = swift.cache
delay_auth_decision = True
token_cache_time = 300
auth_url = http://ip:port
include_service_catalog = False
www_authenticate_uri = http://ip:port
memcached_servers = ip:port
password = password
revocation_cache_time = 60
memcache_security_strategy = ENCRYPT
project_domain_id = dommain
[filter:keystoneauth]
use = egg:swift#keystoneauth
operator_roles = role
reseller_admin_role = role

delay_auth_decision = False in authtoken section in proxy-server.conf file do the job.
delay_auth_decision : delay_auth_decision defaults to False, but leaving it as false will prevent other auth systems, staticweb, tempurl, formpost, and ACLs from working. This value must be explicitly set to True.
Now only files owners can view/create/edit containers/objects -> ACLs and sharing won't works.

Related

Config superset to each user access to their own dataset and database

I set up superset to use LDAP in order authentication but I have one problem.
All users have Gamma Role by default and everyone can view other users' dataSources and databases.
How can it be set so that each user can see only the datasources he created?
Modify LDAP configuration
AUTH_TYPE = AUTH_LDAP
AUTH_ROLE_ADMIN ='Admin'
AUTH_LDAP_USE_TLS = False
AUTH_USER_REGISTRATION_ROLE= "Admin"
AUTH_LDAP_FIRSTNAME_FIELD = "givenName"
AUTH_LDAP_LASTNAME_FIELD = "sn"
AUTH_LDAP_EMAIL_FIELD = "mail"
AUTH_USER_REGISTRATION = True
AUTH_LDAP_SERVER = "ldap://xxx.xxx.xxx.xxx:389"
AUTH_LDAP_SEARCH = "cn=hall.net,ou=groups,dc=dataops,dc=dg"
AUTH_LDAP_USERNAME_FORMAT = "uid=%s,ou=groups1,dc=dataops,dc=dg"
AUTH_LDAP_UID_FIELD = "uid"
AUTH_ROLES_MAPPING = {
"cn=hall.net,ou=groups,dc=dataops,dc=dg": ["Admin"],
"cn=hall.biz,ou=groups,dc=dataops,dc=dg": ["Admin"],
}
#AUTH_LDAP_GROUP_FIELD = "memberOf"
AUTH_ROLES_SYNC_AT_LOGIN = False
PERMANENT_SESSION_LIFETIME = 1800
AUTH_LDAP_BIND_USER = "cn=supersetadmin_MX4G,cn=hall.net,ou=groups,dc=dataops,dc=dg"
AUTH_LDAP_BIND_PASSWORD = "6P8HIKBZCZ"
AUTH_ROLE_PUBLIC = 'Public'
PUBLIC_ROLE_LIKE = 'Alpha'

Redis | Terraform | Creates every time freshly after execution of terraform apply

I am creating Redis in AWS using Terraform. But When I execute terraform apply command for first time it creates without issues. But If I re-run Terraform apply below TF code destroys the Redis and starts re-creating it instead it should tell me that it already exists start focusing on other newly added resources .
Is it expected behaviour of Redis?
Adding terraform plan in the question:
-/+ resource "aws_elasticache_replication_group" "redis" {
apply_immediately = true
at_rest_encryption_enabled = true
auto_minor_version_upgrade = false
automatic_failover_enabled = true
+ configuration_endpoint_address = (known after apply)
engine = "redis"
engine_version = "5.0.4"
~ id = "dev-af-redis" -> (known after apply)
maintenance_window = "sun:06:00-sun:07:00"
~ member_clusters = [
- "ca-cng-dev-af-redis-001",
- "ca-cng-dev-af-redis-002",
] -> (known after apply)
node_type = "cache.t2.medium"
~ number_cache_clusters = 2 -> (known after apply)
parameter_group_name = "default.redis5.0"
port = 6379
~ primary_endpoint_address = "master.dev-af-redis.qxyj8a.euc1.cache.amazonaws.com" -> (known after apply)
replication_group_description = "Airflow Cluster"
replication_group_id = "dev-af-redis"
security_group_ids = [
"sg-094175ad3062da04d",
]
~ security_group_names = [] -> (known after apply)
- snapshot_retention_limit = 0 -> null
~ snapshot_window = "02:30-03:30" -> (known after apply)
subnet_group_name = "dev-subnet-group-airflow"
tags = {
"Application" = "project"
"BusinessUnit" = "subproject"
"Classification" = "private"
"Environment" = "development"
"Name" = "dev-airflow-redis"
"TechnicalOwner" = "ops"
"Tier" = "orchestration"
}
transit_encryption_enabled = true
+ cluster_mode {
+ num_node_groups = 1
+ replicas_per_node_group = 1 # forces replacement
}
}
Plan: 1 to add, 0 to change, 1 to destroy.
TF code which used to create Redis:-
resource "aws_elasticache_replication_group" "cng_redis" {
replication_group_description = "Cluster"
replication_group_id = "dev-af-redis"
engine = "redis"
engine_version = "5.0.4"
node_type = "cache.t2.medium "
port = 6379
subnet_group_name = "dev-subnet-group-airflow"
security_group_ids = ["${aws_security_group.airflow_sg.id}"]
parameter_group_name = "default.redis5.0"
at_rest_encryption_enabled = true
transit_encryption_enabled = true
maintenance_window = "sun:06:00-sun:07:00"
auto_minor_version_upgrade = false
apply_immediately = true
automatic_failover_enabled = true
cluster_mode {
num_node_groups = "1"
replicas_per_node_group = "1"
}
tags = merge(
var.common_tags,
map("Classification", "private"),
map("Name", "airflow-redis")
)
}
Here is a solution ("this is not a bug, it's a feature" case, I suppose ;) ): https://github.com/terraform-providers/terraform-provider-aws/issues/4817#issuecomment-463993424
I tested it and it works.
You have to add parameter group with cluster-enabled set to yes.
I'm using Redis 5.0.5, so to my aws_elasticache_replication_group I added:
resource "aws_elasticache_replication_group" "elc-rep-group" {
...
automatic_failover_enabled = true #this is required, when cluster-enabled parameter is on
parameter_group_name = "default.redis5.0.cluster.on"
...
}

Access Control In Airpal : shiro_static_users.ini

I am not able to access airpal through username and password that i provides in shiro_static_users.ini file , After the changes done i had compile the code and start the airpal server :
Without giving any username and password i am able to access the airpal server.
I have pm as a database in hive and want to give permission to raj user.
My ini file:
authc = org.apache.shiro.web.filter.authc.PassThruAuthenticationFilter
shiro.loginUrl = /login
authc.successUrl = /app
allGroup = com.airbnb.shiro.UserGroup
allGroup.groups = all
allGroup.permissions = access, read:*:*, write:*:*
allGroup.defaultConnector = hive
allGroup.defaultSchema = default
allGroup.timeout = 40m
allGroup.accessLevel = User
allGroup1 = com.airbnb.shiro.UserGroup
allGroup1.groups = pm
allGroup1.permissions = access, read:*:*, write:*:*
allGroup1.defaultConnector = hive
allGroup1.defaultSchema = pm
allGroup1.timeout = 40m
allGroup1.accessLevel = User
cacheManager = org.apache.shiro.cache.MemoryConstrainedCacheManager
sessionManager = org.apache.shiro.web.session.mgt.DefaultWebSessionManager
sessionListener = com.airbnb.shiro.SessionListenerLogger
allowAllFilter = com.airbnb.shiro.filter.AllowAllFilter
allowAllFilter.groups = $allGroup ,$allGroup1
allowAllFilter.loginUrl = /login
securityManager.sessionManager = $sessionManager
[users]
test = test, all
raj = raj, pm
[roles]
all = *
pm = *
[urls]
/login = authc
/logout = logout
/app = authc, perms["access"]
/api/** = authc, perms["access"]

How do you configure UDPInput to work with heka-flood udp test

I am trying to test sending data to heka's UDPInput with no success. I decided to try to use the heka-flood tool to mimic UPD traffic also with no success. I am using 0.10 version of heka. My heka.toml :
[UdpInput]
address = "127.0.0.1:4880"
net = "udp"
splitter = "udp_splitter"
decoder = "ProtobufDecoder"
set_hostname = true
# I have also tried not setting this as well
[udp_splitter]
type = "HekaFramingSplitter"
[ProtobufDecoder]
[LogOutput]
type = "LogOutput"
message_matcher = "Logger == 'UdpInput'"
encoder = "PayloadEncoder"
and my flood.toml:
[udp_proto]
ip_address = "127.0.0.1:4880"
sender = "udp"
pprof_file = ""
encoder = "protobuf"
num_messages = 1000
corrupt_percentage = 0.0001
signed_percentage = 0.00011
variable_size_messages = false
ascii_only = true
max_message_size = 32000
If I add another input, like say a log tailer and add it to the message matcher for the LogOutput, those messages end up being logged out. I never see anything from the UpdInput. What am I doing wrong?

Doxygen - Objective-C - Document Private Class functions Private

In doxygen, I can create objective-c categories inside my implementation file to hide interfaces that shouldn't be accessed publicly. However, doxygen still documents the category as the members being "public". Even by adding the \protected or \private, this is still the case.
Is there another method that I'm overlooking that would put this in a "Private Methods" section of that category...or better yet in the same document that the public class is in only listed as private?
Here's part of my config:
#---------------------------------------------------------------------------
# Project related configuration options
#---------------------------------------------------------------------------
DOXYFILE_ENCODING = UTF-8
PROJECT_NAME = "My Project"
PROJECT_NUMBER =
OUTPUT_DIRECTORY = /Users/theuser/Development/myApplication/Documentation
CREATE_SUBDIRS = NO
OUTPUT_LANGUAGE = English
BRIEF_MEMBER_DESC = YES
REPEAT_BRIEF = YES
ABBREVIATE_BRIEF =
ALWAYS_DETAILED_SEC = NO
INLINE_INHERITED_MEMB = NO
FULL_PATH_NAMES = YES
STRIP_FROM_PATH =
STRIP_FROM_INC_PATH =
SHORT_NAMES = NO
JAVADOC_AUTOBRIEF = NO
QT_AUTOBRIEF = NO
MULTILINE_CPP_IS_BRIEF = NO
INHERIT_DOCS = YES
SEPARATE_MEMBER_PAGES = NO
TAB_SIZE = 8
ALIASES =
OPTIMIZE_OUTPUT_FOR_C = NO
OPTIMIZE_OUTPUT_JAVA = NO
OPTIMIZE_FOR_FORTRAN = NO
OPTIMIZE_OUTPUT_VHDL = NO
EXTENSION_MAPPING =
BUILTIN_STL_SUPPORT = NO
CPP_CLI_SUPPORT = NO
SIP_SUPPORT = NO
IDL_PROPERTY_SUPPORT = YES
DISTRIBUTE_GROUP_DOC = NO
SUBGROUPING = YES
TYPEDEF_HIDES_STRUCT = YES
SYMBOL_CACHE_SIZE = 0
#---------------------------------------------------------------------------
# Build related configuration options
#---------------------------------------------------------------------------
EXTRACT_ALL = YES
EXTRACT_PRIVATE = YES
EXTRACT_STATIC = YES
EXTRACT_LOCAL_CLASSES = YES
EXTRACT_LOCAL_METHODS = NO
EXTRACT_ANON_NSPACES = NO
HIDE_UNDOC_MEMBERS = YES
HIDE_UNDOC_CLASSES = YES
HIDE_FRIEND_COMPOUNDS = YES
HIDE_IN_BODY_DOCS = NO
INTERNAL_DOCS = YES
CASE_SENSE_NAMES = NO
HIDE_SCOPE_NAMES = NO
SHOW_INCLUDE_FILES = YES
INLINE_INFO = YES
SORT_MEMBER_DOCS = YES
SORT_BRIEF_DOCS = YES
SORT_GROUP_NAMES = NO
SORT_BY_SCOPE_NAME = NO
GENERATE_TODOLIST = YES
GENERATE_TESTLIST = YES
GENERATE_BUGLIST = YES
GENERATE_DEPRECATEDLIST= YES
ENABLED_SECTIONS =
MAX_INITIALIZER_LINES = 30
SHOW_DIRECTORIES = NO
SHOW_FILES = YES
SHOW_NAMESPACES = YES
FILE_VERSION_FILTER =
LAYOUT_FILE =
Have you tried \internal in code, coupled with INTERNAL_DOCS = NO in Doxyfile?
Another way to put some part of the code out of doxygen scope is to use \cond and \endcond commands.