I have a function for the admin to burn tokens in a bucket.
pub fn burn_in_bucket(&mut self, mut bucket: Bucket) {
assert!(bucket.resource_address() == self.token_vault.resource_address(), "input token invalid");
let amount = bucket.amount();
bucket.take(amount).burn();
}
I know how to make a method call with badge protected access...
And I have learned from Scrypto-example/nft/magic-card:
https://github.com/radixdlt/scrypto-examples
let manifest = ManifestBuilder::new(&NetworkDefinition::simulator())
.create_proof_from_account_by_amount(user.compo_addr, badge_amount, badge_addr)
.withdraw_from_account_by_amount(user.compo_addr, amount, token_addr)
.take_from_worktop_by_amount(amount, token_addr, |builder, bucket_id| {
builder.call_method(component, func_name, args!(Bucket(bucket_id)))
})
.call_method(
user.compo_addr,
"deposit_batch",
args!(Expression::entire_worktop()),
)
.build();
let receipt = test_runner.execute_manifest_ignoring_fee(
manifest,
vec![NonFungibleAddress::from_public_key(&user.public_key)],
);
receipt.expect_commit_success();
The instructions:
Instructions:
├─ CALL_METHOD ComponentAddress("NormalComponent[0257...]") "lock_fee" Decimal("100");
├─ CALL_METHOD ComponentAddress("AccountComponent[03d8...]") "create_proof_by_amount" Decimal("3") ResourceAddress("NormalResource[00f1...]");
├─ CALL_METHOD ComponentAddress("AccountComponent[03d8...]") "withdraw_by_amount" Decimal("12") ResourceAddress("NormalResource[004a...]");
├─ TAKE_FROM_WORKTOP_BY_AMOUNT Decimal("12") ResourceAddress("NormalResource[004a...]") Bucket("bucket1");
├─ CALL_METHOD ComponentAddress("NormalComponent[028f...]") "burn_in_bucket" Bucket("bucket1");
└─ CALL_METHOD ComponentAddress("AccountComponent[03d8...]") "deposit_batch" Expression("ENTIRE_WORKTOP");
Then I got this error:
COMMITTED FAILURE: KernelError(InvalidDropNodeVisibility { mode: Application, actor: Method(
Scrypto { package_address: NormalPackage[0165...], blueprint_name: "StableCoin", ident: "burn_in_bucket" },
ResolvedReceiver { derefed_from: Some((Global(Component(NormalComponent[028f...])), 36)), receiver: Component([143, 123, 111, 139, 198, 156, 205, 139, 18, 0, 234, 91, 33, 85, 171, 28, 89, 180, 217, 77, 183, 128, 169, 59, 111, 177, 96, 148, 55, 190, 141, 232, 15, 4, 0, 0]) }),
node_id: Bucket(1027) })
I suspect the "deposit_batch" is not correct, but don't know what is wrong...
The possible problem would be you didn't return the bucket after you burned it.
The code would be like this:
pub fn burn_in_bucket(&mut self, mut bucket: Bucket) -> Bucket {
assert!(bucket.resource_address() == self.token_vault.resource_address(), "input token invalid");
let amount = bucket.amount();
bucket.take(amount).burn();
return bucket
}
Notice: I have rechecked the problem and actually it's not a good design to burn token like that. It would be a better design if you use a burn flag to protect token and burn the whole bucket at once, like this:
pub fn burn_in_bucket(&mut self, mut bucket: Bucket) {
assert!(bucket.resource_address() == self.token_vault.resource_address(), "input token invalid");
self.token_badge.authorize(|| {
bucket.burn()
});
}
Related
When I run the train.py script from https://github.com/tensorflow/models/tree/master/official/nlp, I got a 403 permission error.
python3 official/nlp/train.py --tpu=con-bert1 --experiment=bert/pretraining --mode=train --model_dir=gs://con_bioberturk/general/ --config_file=gs://con_bioberturk/bert_base.yaml --config_file=gs://con_bioberturk/pretrain.yaml --params_override="task.init_checkpoint=gs://con_bioberturk/bert-base-turkish-cased-tf/model.ckpt"`
and my output is below:
I1115 07:49:02.847452 139877506112576 train_utils.py:368] Saving experiment configuration to gs://con_bioberturk/general/params.yaml
Traceback (most recent call last):
File "/usr/share/tpu/models/official/modeling/hyperparams/params_dict.py", line 349, in save_params_dict_to_yaml
yaml.dump(params.as_dict(), f, default_flow_style=False)
File "/usr/local/lib/python3.8/dist-packages/yaml/__init__.py", line 290, in dump
return dump_all([data], stream, Dumper=Dumper, **kwds)
File "/usr/local/lib/python3.8/dist-packages/yaml/__init__.py", line 278, in dump_all
dumper.represent(data)
File "/usr/local/lib/python3.8/dist-packages/yaml/representer.py", line 28, in represent
self.serialize(node)
File "/usr/local/lib/python3.8/dist-packages/yaml/serializer.py", line 55, in serialize
self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
File "/usr/local/lib/python3.8/dist-packages/yaml/emitter.py", line 115, in emit
self.state()
File "/usr/local/lib/python3.8/dist-packages/yaml/emitter.py", line 220, in expect_document_end
self.flush_stream()
File "/usr/local/lib/python3.8/dist-packages/yaml/emitter.py", line 790, in flush_stream
self.stream.flush()
File "/usr/local/lib/python3.8/dist-packages/tensorflow/python/lib/io/file_io.py", line 219, in flush
self._writable_file.flush()
tensorflow.python.framework.errors_impl.PermissionDeniedError: Error executing an HTTP request: HTTP response code 403 with body '{
"error": {
"code": 403,
"message": "Access denied.",
"errors": [
{
"message": "Access denied.",
"domain": "global",
"reason": "forbidden"
}
]
}
}
when initiating an upload to gs://con_bioberturk/general/params.yaml
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "official/nlp/train.py", line 82, in <module>
app.run(main)
File "/usr/local/lib/python3.8/dist-packages/absl/app.py", line 308, in run
_run_main(main, args)
File "/usr/local/lib/python3.8/dist-packages/absl/app.py", line 254, in _run_main
sys.exit(main(argv))
File "official/nlp/train.py", line 47, in main
train_utils.serialize_config(params, model_dir)
File "/usr/share/tpu/models/official/core/train_utils.py", line 370, in serialize_config
hyperparams.save_params_dict_to_yaml(params, params_save_path)
File "/usr/share/tpu/models/official/modeling/hyperparams/params_dict.py", line 349, in save_params_dict_to_yaml
yaml.dump(params.as_dict(), f, default_flow_style=False)
File "/usr/local/lib/python3.8/dist-packages/tensorflow/python/lib/io/file_io.py", line 197, in __exit__
self.close()
File "/usr/local/lib/python3.8/dist-packages/tensorflow/python/lib/io/file_io.py", line 239, in close
self._writable_file.close()
tensorflow.python.framework.errors_impl.PermissionDeniedError: Error executing an HTTP request: HTTP response code 403 with body '{
"error": {
"code": 403,
"message": "Access denied.",
"errors": [
{
"message": "Access denied.",
"domain": "global",
"reason": "forbidden"
}
]
}
}
'
Here is my settings:
tpu-vm name:con-bert1
TPU software version: tpu-vm-tf-2.10.0-pod
cloud bucket (con_bioberturk) and tpu-vm are in the same location
Looks like you need to add the service account that is currently active on your TPU VM to the GCS IAM. Instructions here - https://github.com/google-research/text-to-text-transfer-transformer/issues/1003
If that fails, try running gcloud auth login --update-adc on your TPU VM to add your credentials.
Hope this resolves your issue.
I am using odoo V16. When I send a POST request to web/session/authenticate endpoint with the correct user credentials in the body like this:
{
"params": {
"db":<DB>,
"login": <LOGIN>,
"password": <PASSWORD>
}
}
I get a bad response with the error message "'NoneType' object has no attribute 'user'".
The expected behavior is a JSON response with session info like:
{
"jsonrpc": "2.0",
"id": null,
"result": {...}
}
I'm not sure if it is a bug in the new odoo version or I'm doing something wrong. Any help is appreciated. Thanks!
Full error response:
{
"jsonrpc": "2.0",
"id": null,
"error": {
"code": 200,
"message": "Odoo Server Error",
"data": {
"name": "builtins.AttributeError",
"debug": "Traceback (most recent call last):
File "/odoo-16/odoo/http.py", line 1963, in call
response = request._serve_nodb()
File "/odoo-16/odoo/http.py", line 1516, in _serve_nodb
response = self.dispatcher.dispatch(rule.endpoint, args)
File "/odoo-16/odoo/http.py", line 1775, in dispatch
result = endpoint(**self.request.params)
File "/odoo-16/odoo/http.py", line 673, in route_wrapper
result = endpoint(self, *args, **params_ok)
File "/odoo-16/addons/web/controllers/session.py", line 52, in authenticate
print('session_info', env['ir.http'].session_info())
File "/odoo-16/addons/web_tour/models/ir_http.py", line 12, in session_info
result = super().session_info()
File "/odoo-16/addons/web/models/ir_http.py", line 68, in session_info
user = request.env.user
AttributeError: 'NoneType' object has no attribute 'user'
",
"message": "'NoneType' object has no attribute 'user'",
"arguments": [
"'NoneType' object has no attribute 'user'"
],
"context": {}
}
}
}
I faced the same problem, and I found a workaround.
Problem
The problem happen when you call the Odoo API for authentication web/session/authenticate.
We usually send the login data like this in POST request to the server
{"params":{"db":"odoo16","login":"admin","password":"***"}}
I got the error saying:
File "/usr/lib/python3/dist-packages/odoo/addons/mail/models/ir_http.py", line 17, in session_info
user = request.env.user
AttributeError: 'NoneType' object has no attribute 'user'
Solution
This problem Happen when you has many databases in the server, so the request handler fail in fetching the user from env variable.
But if you update your odoo.conf and added a dbfilter = odoo16, and restart the server. you will have only one database like this:
Then if you call the API you will get the CORRECT response and works fine with you,
{
"jsonrpc": "2.0",
"id": null,
"result": {
"uid": 2,
"is_system": true,
"is_admin": true,
"user_context": {
"lang": "en_US",
"tz": "Africa/Cairo",
"uid": 2
},
...
...
...
}
}
Hope this help you fixing your issue till Odoo fix there bug.
I have 2 questions concerning a Seaweedfs cluster we have running. The leader is started with the following command:
/usr/local/bin/weed server -ip=192.168.13.154 -ip.bind=192.168.13.154 -dir=/opt/seaweedfs/volume-1,/opt/seaweedfs/volume-2,/opt/seaweedfs/volume-3 -master.dir=/opt/seaweedfs/master -master.peers=192.168.13.154:9333,192.168.13.155:9333,192.168.13.156:9333 -volume.max=30,30,30 -filer=true -s3=true -metrics.address=192.168.13.84:9091
Question 1
I created a master.toml file using weed scaffold -config=master:
[master.maintenance]
# periodically run these scripts are the same as running them from 'weed shell'
scripts = """
ec.encode -fullPercent=95 -quietFor=1h
ec.rebuild -force
ec.balance -force
volume.balance -force
"""
sleep_minutes = 17 # sleep minutes between each script execution
However the maintenance scripts seem to fail because
shell failed to keep connected to localhost:9333: rpc error: code = Unavailable desc = all SubConns are in TransientFailure, latest connection error: connection error: desc = "transport: Error while dialing dial tcp [::1]:19333: connect: connection refused"
This makes sense since the master is bound to ip 192.168.13.154 and the maintenance script tries to connect to localhost. How can I specificy the master ip in the master.toml file?
Question 2
The amount of volumes seems to grow faster than the amount of disk space used. For example on the .154 server there are only 11 free volumes. But looking at the disk space there should be much more.
Status:
{
"Topology": {
"DataCenters": [
{
"Free": 16,
"Id": "DefaultDataCenter",
"Max": 270,
"Racks": [
{
"DataNodes": [
{
"EcShards": 0,
"Free": 0,
"Max": 90,
"PublicUrl": "192.168.13.155:8080",
"Url": "192.168.13.155:8080",
"Volumes": 90
},
{
"EcShards": 0,
"Free": 11,
"Max": 90,
"PublicUrl": "192.168.13.154:8080",
"Url": "192.168.13.154:8080",
"Volumes": 79
},
{
"EcShards": 0,
"Free": 5,
"Max": 90,
"PublicUrl": "192.168.13.156:8080",
"Url": "192.168.13.156:8080",
"Volumes": 85
}
],
"Free": 16,
"Id": "DefaultRack",
"Max": 270
}
]
}
],
"Free": 16,
"Max": 270,
"layouts": [
...
]
},
"Version": "30GB 1.44"
}
Disk (192.168.13.154):
/dev/sdb1 1007G 560G 397G 59% /opt/seaweedfs/volume-1
/dev/sdc1 1007G 542G 414G 57% /opt/seaweedfs/volume-2
/dev/sdd1 1007G 398G 559G 42% /opt/seaweedfs/volume-3
Is this related to the maintenance scripts not running properly, or is there something else I'm not understanding correctly?
Question 1: Added a fix https://github.com/chrislusf/seaweedfs/commit/56244fb9a13c75616aa8a9232c62d1b896906e98
Question 2: Likely related to master leadership changes.
It seems that jsonschema version 3.0.1 does not accept multi-stage schema using $refs (while it works with jsonschema version 2.6.0).
I have to make it work under several module versions simply because my code will be running on different computers with different environments.
I verified my jsons on https://www.jsonschemavalidator.net/ (thanks for this link found in another StackOverflow question).
I Tried :
jsonschema -i myjson.json noRefs.schema.json --> 2.6.0 = OK, 3.0.1 OK
jsonschema -i myjson.json usingRefs.schema.json --> 2.6.0 = OK, 3.0.1 KO
Note :
Both *.schema.json worked on https://www.jsonschemavalidator.net/
File myjson.json :
{
"TopProperty" : {
"LowerProperty" : {"toto" : "plop"}
}
}
File noRefs.schema.json :
{
"type": "object",
"properties": {
"TopProperty": {"$ref": "#/schemaTopProperty"}
},
"schemaTopProperty": {
"$id": "schemaTopProperty",
"type": "object",
"properties": {
"LowerProperty": {
"type": "object",
"properties": {
"toto": {"type": "string"}
}
}
}
}
}
File usingRefs.schema.json :
{
"type": "object",
"properties": {
"TopProperty": {"$ref": "#/schemaTopProperty"}
},
"schemaTopProperty": {
"$id": "schemaTopProperty",
"type": "object",
"properties": {
"LowerProperty": {
"type": "object",
"properties": {
"toto": {"$ref": "#/justAString"}
}
}
}
},
"justAString": {
"$id": "justAString",
"type": "string"
}
}
Error message received :
Traceback (most recent call last):
File "/usr/bin/jsonschema", line 11, in <module>
sys.exit(main())
File "/usr/lib/python2.7/site-packages/jsonschema/cli.py", line 67, in main
sys.exit(run(arguments=parse_args(args=args)))
File "/usr/lib/python2.7/site-packages/jsonschema/cli.py", line 78, in run
for error in validator.iter_errors(instance):
File "/usr/lib/python2.7/site-packages/jsonschema/validators.py", line 323, in iter_errors
for error in errors:
File "/usr/lib/python2.7/site-packages/jsonschema/_validators.py", line 274, in properties
schema_path=property,
File "/usr/lib/python2.7/site-packages/jsonschema/validators.py", line 339, in descend
for error in self.iter_errors(instance, schema):
File "/usr/lib/python2.7/site-packages/jsonschema/validators.py", line 323, in iter_errors
for error in errors:
File "/usr/lib/python2.7/site-packages/jsonschema/_validators.py", line 251, in ref
for error in validator.descend(instance, resolved):
File "/usr/lib/python2.7/site-packages/jsonschema/validators.py", line 339, in descend
for error in self.iter_errors(instance, schema):
File "/usr/lib/python2.7/site-packages/jsonschema/validators.py", line 323, in iter_errors
for error in errors:
File "/usr/lib/python2.7/site-packages/jsonschema/_validators.py", line 274, in properties
schema_path=property,
File "/usr/lib/python2.7/site-packages/jsonschema/validators.py", line 339, in descend
for error in self.iter_errors(instance, schema):
File "/usr/lib/python2.7/site-packages/jsonschema/validators.py", line 323, in iter_errors
for error in errors:
File "/usr/lib/python2.7/site-packages/jsonschema/_validators.py", line 73, in items
for error in validator.descend(item, items, path=index):
File "/usr/lib/python2.7/site-packages/jsonschema/validators.py", line 339, in descend
for error in self.iter_errors(instance, schema):
File "/usr/lib/python2.7/site-packages/jsonschema/validators.py", line 323, in iter_errors
for error in errors:
File "/usr/lib/python2.7/site-packages/jsonschema/_validators.py", line 274, in properties
schema_path=property,
File "/usr/lib/python2.7/site-packages/jsonschema/validators.py", line 339, in descend
for error in self.iter_errors(instance, schema):
File "/usr/lib/python2.7/site-packages/jsonschema/validators.py", line 323, in iter_errors
for error in errors:
File "/usr/lib/python2.7/site-packages/jsonschema/_validators.py", line 247, in ref
scope, resolved = validator.resolver.resolve(ref)
File "/usr/lib/python2.7/site-packages/jsonschema/validators.py", line 734, in resolve
return url, self._remote_cache(url)
File "/usr/lib/python2.7/site-packages/functools32/functools32.py", line 400, in wrapper
result = user_function(*args, **kwds)
File "/usr/lib/python2.7/site-packages/jsonschema/validators.py", line 744, in resolve_from_url
raise exceptions.RefResolutionError(exc)
jsonschema.exceptions.RefResolutionError: unknown url type: schemaTopProperty
Edit: my previous answer was incorrect.
TL;DR: You have two options:
Remove the $id properties from the definitions
Use #/ in the $id properties (Example: {"$id": "#/justAString"})
Details:
The issue is with the IDs, up until draft-04, $ref and $id were treated at face value, nothing special, but starting with draft-06 these are uri-references, in which case, when descending into {"$id": "schemaTopProperty"}, resolving {"$ref": "justAString"} is no more looking for a fragment justAString at the root structure, but for /justAString under schemaTopProperty host, which is a remote reference.
Hence my solutions to either remove the $ids which cause the definitions to be URLs (hosts in fact), or to define the $ids as what they are, fragments in the current schema.
I've been looking through various answers on this topic but haven't been able to get a working solution.
I have airflow setup to Log to s3 but the UI seems to only use File based task handler instead of the S3 one specified.
I have the s3 connection setup as follows
Conn_id = my_conn_S3
Conn_type = S3
Extra = {"region_name": "us-east-1"}
(the ECS instance use a role that has full s3 permissions)
I have created a log_config file with the following settings also
remote_log_conn_id = my_conn_S3
encrypt_s3_logs = False
logging_config_class = log_config.LOGGING_CONFIG
task_log_reader = s3.task
And in my log config I have the following setup
LOG_LEVEL = conf.get('core', 'LOGGING_LEVEL').upper()
LOG_FORMAT = conf.get('core', 'log_format')
BASE_LOG_FOLDER = conf.get('core', 'BASE_LOG_FOLDER')
PROCESSOR_LOG_FOLDER = conf.get('scheduler', 'child_process_log_directory')
FILENAME_TEMPLATE = '{{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log'
PROCESSOR_FILENAME_TEMPLATE = '{{ filename }}.log'
S3_LOG_FOLDER = 's3://data-team-airflow-logs/airflow-master-tester/'
LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'airflow.task': {
'format': LOG_FORMAT,
},
'airflow.processor': {
'format': LOG_FORMAT,
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'airflow.task',
'stream': 'ext://sys.stdout'
},
'file.processor': {
'class': 'airflow.utils.log.file_processor_handler.FileProcessorHandler',
'formatter': 'airflow.processor',
'base_log_folder': os.path.expanduser(PROCESSOR_LOG_FOLDER),
'filename_template': PROCESSOR_FILENAME_TEMPLATE,
},
# When using s3 or gcs, provide a customized LOGGING_CONFIG
# in airflow_local_settings within your PYTHONPATH, see UPDATING.md
# for details
's3.task': {
'class': 'airflow.utils.log.s3_task_handler.S3TaskHandler',
'formatter': 'airflow.task',
'base_log_folder': os.path.expanduser(BASE_LOG_FOLDER),
's3_log_folder': S3_LOG_FOLDER,
'filename_template': FILENAME_TEMPLATE,
},
},
'loggers': {
'': {
'handlers': ['console'],
'level': LOG_LEVEL
},
'airflow': {
'handlers': ['console'],
'level': LOG_LEVEL,
'propagate': False,
},
'airflow.processor': {
'handlers': ['file.processor'],
'level': LOG_LEVEL,
'propagate': True,
},
'airflow.task': {
'handlers': ['s3.task'],
'level': LOG_LEVEL,
'propagate': False,
},
'airflow.task_runner': {
'handlers': ['s3.task'],
'level': LOG_LEVEL,
'propagate': True,
},
}
}
I can see the logs on S3 but when I navigate to the UI logs all I get is
*** Log file isn't local.
*** Fetching here: http://1eb84d89b723:8793/log/hermes_pull_double_click_click/hermes_pull_double_click_click/2018-02-26T11:22:00/1.log
*** Failed to fetch log file from worker. HTTPConnectionPool(host='1eb84d89b723', port=8793): Max retries exceeded with url: /log/hermes_pull_double_click_click/hermes_pull_double_click_click/2018-02-26T11:22:00/1.log (Caused by NewConnectionError('<urllib3.connection.HTTPConnection object at 0x7fe6940fc048>: Failed to establish a new connection: [Errno -2] Name or service not known',))
I can see in the logs that its successfully importing the log_config.py (I included a init.py as well)
Can't see why its using the FileTaskHandler here instead of the S3 one
Any help would be great, thanks
In my scenario it wasn't airflow that was at fault here.
I was able to go to the gitter channel and talk to the guys there.
After putting print statements into the python code that was running I was able to catch an exception on this line of code.
https://github.com/apache/incubator-airflow/blob/4ce4faaeae7a76d97defcf9a9d3304ac9d78b9bd/airflow/utils/log/s3_task_handler.py#L119
The exception was a recusion max depth issue on the SSLContext, which after looking around on the web seemed to be coming from using some combination of gevent with unicorn.
https://github.com/gevent/gevent/issues/903
I switched this back to sync and had to change the AWS ELB Listener to TCP but after that the logs were working fine through the UI
Hope this helps others.