raise exception.with_traceback(None) from new_cause ValueError: Expected singleton: - odoo

enter image description here
This my code and i don't know why this error happen.
#api.model
def update_prices_in_mitra(self):
self.ensure_one()
select_hargamitra = self.env['isd.res.partner.price_list'].search(
[('partner_id', '=', self.mitra_id.id)])
cek = []
# self.task_orderlines = [(5, 0, 0)]
for cek in select_hargamitra.price_list:
if (select_hargamitra and (len(select_hargamitra.price_list) > 0)):
for x in self.task_orderlines:
if cek.product_id.id == self.product_id.id:
x.write({'mitra_price': cek.price_amount,
'isd_mitra_status' : True})

Try this because there might be multiple record in a page i.e. tree view
#api.model
def update_prices_in_mitra(self):
for rec in self:
select_hargamitra = self.env['isd.res.partner.price_list'].search([('partner_id', '=', rec.mitra_id.id)])
cek = []
for cek in select_hargamitra.price_list:
if (select_hargamitra and (len(select_hargamitra.price_list) > 0)):
for x in rec.task_orderlines:
if cek.product_id.id == rec.product_id.id:
x.write({'mitra_price': cek.price_amount,
'isd_mitra_status' : True})

Related

ValueError: Expected singleton: daily.attendance.line(123,124,125

#api.model
def create(self, vals):
curr = datetime.now()
new_date = datetime.strftime(curr, '%Y-%m-%d')
cal_obj = self.env['daily.attendance'].search([])
#api.constrains('date')
def _date_test_unique(self):
for rec in self:
if self.search_count([('date', '=', rec.date)]) > 1:
raise ValidationError(_('Current Date Attendance Already Existed!'))
#api.onchange('user_id')
def onchange_department(self):
if self.user_id == True:
emps = self.env['hr.employee'].search([])
emp_attd = []
from datetime import datetime
now = datetime.now() # current date and time
check_in = now.strftime('%Y-%m-%d %H:%M:%S')
check_in_from = now.strftime('%Y-%m-%d 05:30')
check_out = now.strftime('%Y-%m-%d %H:%M:%S')
check_out_from = now.strftime('%Y-%m-%d 14:30')
for emp in emps:
vals = {
'employe_id':emp.id,
'check_in': check_in_from,
'check_out': check_out_from,
'is_present': True
}
emp_attd.append([0, 0, vals])
self.update({
'employee_ids': emp_attd,
})
else:
self.employee_ids = False
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
The error happens when Odoo tries to get employee_ids value from a record set but it expects a record.
for emp in self.employee_ids:
You need to loop over self then access employee_ids field value for each record:
Example:
def attendance_validate(self):
for rec in self:
for emp in rec.employee_ids:
You should move the following code outside the for loop
self.write({'state': 'validate', })
Example:
hr_attendance = self.env['hr.attendance']
for rec in self:
for emp in rec.employee_ids:
if emp.is_present == True:
attd_crete_id = hr_attendance .create({'employee_id': emp.employe_id.id,
'check_in': emp.check_in,
'check_out': emp.check_out,
})
rec.write({
'state': 'validate',
})
...
Probably you need to call write state to validate when the attendance_validate method succeed (at the end)
improvement:
The following expression
if emp.is_present == True:
can be simplified to:
if emp.is_present:
You are using two fields is_present and is_absent, you can simply use is_present and when its value is False (not set) the employee is absent.
You need to remove the second if statement, which is useless
elif emp.is_absent == True:
if emp.is_absent == True:
Avoid raising a validation error in the create method because it will break the workflow, instead you can define a constraint on date field:
#api.constrains('date')
def _date_test_unique(self):
for rec in self:
if self.search_count([('date', '=', rec.date)]) > 1:
raise ValidationError(_('Current Date Attendance Already Existed!'))
Update:
The create method should return the newly created record:
#api.model
def create(self, vals):
res = super(CLASS_NAME, self).create(vals)
# Your code
return res
write a for loop before the if statement
#api.onchange('is_present')
def onchange_attendance(self):
for rec in self:
if rec.is_present:
rec.is_absent = False

Change Many2one domain based on selection field

I want to change M2O field domain based on the user selection from selection field:
#api.onchange('search_by')
def _get_partner(self):
partners = self.env['customer.regist'].search([('name','!=','New')])
partner_list = []
partner_list2 = []
for rec in partners:
partner_list.append(rec.name)
partner_list2.append(rec.phone)
res = {}
if self.search_by == 'code':
res['domain'] = {'search_value': [('name', 'in', partner_list)]}
if self.search_by == 'phone':
res['domain'] = {'search_value': [('phone', 'in', partner_list2)]}
return res
but the domain not change and get the default domain from model
Change your function like this.
#api.onchange('search_by')
def _get_partner(self):
partners = self.env['customer.regist'].search([('name','!=','New')])
partner_list = []
partner_list2 = []
for rec in partners:
partner_list.append(rec.name)
partner_list2.append(rec.phone)
res = {}
if self.search_by == 'code':
domain = {'search_value': [('name', 'in', partner_list)]}
return {'domain': domain}
if self.search_by == 'phone':
domain = {'search_value': [('phone', 'in', partner_list2)]}
return {'domain': domain}
Try doing this :
#api.onchange('search_by')
def _get_partner(self):
partners = self.env['customer.regist'].search([('name','!=','New')])
partner_list = []
partner_list2 = []
for rec in partners:
if rec.name :
partner_list.append(rec.name)
if rec.phone:
partner_list2.append(rec.phone)
res = {}
if self.search_by == 'code':
res['domain'] = {'search_value': [('name', 'in', partner_list)]}
if self.search_by == 'phone':
res['domain'] = {'search_value': [('phone', 'in', partner_list2)]}
return res
Make sure that the 'False' values are never added to the domain, since it might give invalid results

Scrapy - issues with 'dont_filter' option for Requests

I must include the option dont_filter=True into each request of my spider, I've already used this option but I don't know why this time I get this error:
Unhandled Error
Traceback (most recent call last):
File "C:\Users\coppe\Anaconda3\envs\scrapyEnv\lib\site-packages\scrapy\commands\crawl.py", line 58, in run
self.crawler_process.start()
File "C:\Users\coppe\Anaconda3\envs\scrapyEnv\lib\site-packages\scrapy\crawler.py", line 293, in start
reactor.run(installSignalHandlers=False) # blocking call
File "C:\Users\coppe\Anaconda3\envs\scrapyEnv\lib\site-packages\twisted\internet\base.py", line 1283, in run
self.mainLoop()
File "C:\Users\coppe\Anaconda3\envs\scrapyEnv\lib\site-packages\twisted\internet\base.py", line 1292, in mainLoop
self.runUntilCurrent()
--- <exception caught here> ---
File "C:\Users\coppe\Anaconda3\envs\scrapyEnv\lib\site-packages\twisted\internet\base.py", line 913, in runUntilCurrent
call.func(*call.args, **call.kw)
File "C:\Users\coppe\Anaconda3\envs\scrapyEnv\lib\site-packages\scrapy\utils\reactor.py", line 41, in __call__
return self._func(*self._a, **self._kw)
File "C:\Users\coppe\Anaconda3\envs\scrapyEnv\lib\site-packages\scrapy\core\engine.py", line 135, in _next_request
self.crawl(request, spider)
File "C:\Users\coppe\Anaconda3\envs\scrapyEnv\lib\site-packages\scrapy\core\engine.py", line 210, in crawl
self.schedule(request, spider)
File "C:\Users\coppe\Anaconda3\envs\scrapyEnv\lib\site-packages\scrapy\core\engine.py", line 216, in schedule
if not self.slot.scheduler.enqueue_request(request):
File "C:\Users\coppe\Anaconda3\envs\scrapyEnv\lib\site-packages\scrapy\core\scheduler.py", line 54, in enqueue_request
if not request.dont_filter and self.df.request_seen(request):
builtins.AttributeError: 'dict' object has no attribute 'dont_filter'
Here is my Spider (sorry it is quite big):
class communes_spider(scrapy.Spider):
name = "corrections"
firstSearchDate = datetime(2019, 8, 4)
crawlDate = firstSearchDate - timedelta(days=31)
path = 'D:/Données/Drivy/'
JSON = []
custom_settings = {
'ROBOTSTXT_OBEY' : True,
'DOWNLOAD_DELAY' : 6,
'CONCURRENT_REQUESTS' : 1,
'CONCURRENT_REQUESTS_PER_DOMAIN': 1,
'AUTOTHROTTLE_ENABLED' : True,
'AUTOTHROTTLE_START_DELAY' : 6,
'LOG_STDOUT' : True,
'LOG_FILE' : 'D:/Données/Drivy/' + str(datetime.date(firstSearchDate)) + '_bis' '/' + 'log_' + str(datetime.date(crawlDate)) + '_' + str(datetime.date(firstSearchDate)) + '.txt',
'FEED_FORMAT': 'json',
'FEED_URI': 'file:///D:/Données/Drivy/' + str(datetime.date(firstSearchDate)) + '_bis' + '/' + str(datetime.date(crawlDate)) + '_' + str(datetime.date(firstSearchDate)) + '.json',
}
start_urls = "https://fr.be.getaround.com/car_models/estimated_earnings?utf8=%E2%9C%93&car_model_estimation%5Bcar_brand_id%5D={}&car_model_estimation%5Bcar_model_id%5D={}&car_model_estimation%5Brelease_year%5D={}&car_model_estimation%5Bmileage%5D={}&car_model_estimation%5Blatitude%5D={}&car_model_estimation%5Blongitude%5D={}&car_model_estimation%5Bregistration_country%5D=BE&car_model_estimation%5Bwith_open_landing_multiplier%5D={}"
def start_requests(self):
with open('C:/Users/coppe/drivy/carBrands.json') as json_file:
brands = json.load(json_file)
with open(self.path + str(datetime.date(self.firstSearchDate)) + '/' + str(datetime.date(self.crawlDate)) + '_' + str(datetime.date(self.firstSearchDate)) + '.json') as json_file:
cars = json.load(json_file)
for car in cars:
if car['carBrand'] == "Citroën":
car['carBrand'] = car['carBrand'].replace('ë','e')
if car['carBrandID'] == 'other' or car['carModelID'] == 'other':
for brand in brands:
if car['carBrand'].lower() == brand['brandName'].lower():
car['carBrandID'] = brand['brandID']
for model in brand['models']:
if car['carModel'].lower() == model['modelNameFrench'].lower() or car['carModel'].lower() == model['modelNameDutch'].lower():
car['carModelID'] = model['modelID']
else:
pass
else:
pass
if car['mileageCode']=='6':
url = self.start_urls.format(car['carBrandID'],car['carModelID'],car['immatricYear'],5,car['carLat'],car['carLong'],car['open'])
else:
url = self.start_urls.format(car['carBrandID'],car['carModelID'],car['immatricYear'],car['mileageCode'],car['carLat'],car['carLong'],car['open'])
yield scrapy.Request(
url=url,
callback=self.parse_sugPrice,
meta={'car':car},
dont_filter=True,
)
elif datetime.date(datetime.strptime(car['crawlDate'],'%Y-%m-%d')).year == 2020:
if car['mileageCode']=='6':
url = self.start_urls.format(car['carBrandID'],car['carModelID'],car['immatricYear'],5,car['carLat'],car['carLong'],car['open'])
else:
url = self.start_urls.format(car['carBrandID'],car['carModelID'],car['immatricYear'],car['mileageCode'],car['carLat'],car['carLong'],car['open'])
yield scrapy.Request(
url=url,
callback=self.parse_sugPrice,
meta={'car':car},
dont_filter=True,
)
else:
yield car
def parse_sugPrice(self, response):
data = json.loads(response.text, encoding="utf8")
selector = Selector(data['html'])
eligibleObj = json.loads(selector.css('a::attr(data-estimated-earnings)').get())
openEligible = response.meta['car']['openEligible']
if response.meta['car']['carBrandID'] == 'other' or response.meta['car']['carModelID'] == 'other':
response.meta['car']['suggestedPrice'] = -1 # No estimation available
else:
if response.meta['car']['open'] == False and openEligible == True:
estimEarnings = int(re.sub("\D",'',selector.css('span.car_model_estimation_result_amount::text').get()))
correctedEstimEarnings = estimEarnings/1.25
response.meta['car']['suggestedPrice'] = correctedEstimEarnings/20 # Suggested price based current earnings condition on open decision (open is true or false)
elif response.meta['car']['open'] == True and openEligible == False:
estimEarnings = int(re.sub("\D",'',selector.css('span.car_model_estimation_result_amount::text').get()))
correctedEstimEarnings = estimEarnings*1.25
response.meta['car']['suggestedPrice'] = correctedEstimEarnings/15 # Suggested price based current earnings condition on open decision (open is true or false)
elif response.meta['car']['open'] == True and openEligible == True:
estimEarnings = int(re.sub("\D",'',selector.css('span.car_model_estimation_result_amount::text').get()))
response.meta['car']['suggestedPrice'] = estimEarnings/20 # Suggested price based current earnings condition on open decision (open is true or false)
else:
estimEarnings = int(re.sub("\D",'',selector.css('span.car_model_estimation_result_amount::text').get()))
response.meta['car']['suggestedPrice'] = estimEarnings/15 # Suggested price based current earnings condition on open decision (open is true or false)
if response.meta['car']['numEvalCar'] > 0:
if response.meta['car']['firstReviewYear'] != datetime.now().year:
estimMembership = datetime.now().year - response.meta['car']['firstReviewYear'] # in years
correctedYear = response.meta['car']['immatricYear'] + estimMembership
if response.meta['car']['mileageCode']=='6':
suggestedPriceLink = "https://fr.be.getaround.com/car_models/estimated_earnings?utf8=%E2%9C%93&car_model_estimation%5Bcar_brand_id%5D={}&car_model_estimation%5Bcar_model_id%5D={}&car_model_estimation%5Brelease_year%5D={}&car_model_estimation%5Bmileage%5D={}&car_model_estimation%5Blatitude%5D={}&car_model_estimation%5Blongitude%5D={}&car_model_estimation%5Bregistration_country%5D=BE&car_model_estimation%5Bwith_open_landing_multiplier%5D={}".format(response.meta['car']['carBrandID'],response.meta['car']['carModelID'],correctedYear,'5',response.meta['car']['carLat'],response.meta['car']['carLong'],response.meta['car']['open'])
else:
suggestedPriceLink = "https://fr.be.getaround.com/car_models/estimated_earnings?utf8=%E2%9C%93&car_model_estimation%5Bcar_brand_id%5D={}&car_model_estimation%5Bcar_model_id%5D={}&car_model_estimation%5Brelease_year%5D={}&car_model_estimation%5Bmileage%5D={}&car_model_estimation%5Blatitude%5D={}&car_model_estimation%5Blongitude%5D={}&car_model_estimation%5Bregistration_country%5D=BE&car_model_estimation%5Bwith_open_landing_multiplier%5D={}".format(response.meta['car']['carBrandID'],response.meta['car']['carModelID'],correctedYear,response.meta['car']['mileageCode'],response.meta['car']['carLat'],response.meta['car']['carLong'],response.meta['car']['open'])
yield scrapy.Request(
url=suggestedPriceLink,
callback=self.parse_correctSugPrice,
meta={'car':response.meta['car']},
dont_filter=True,
)
else:
yield response.meta['car']
else:
yield response.meta['car']
def parse_correctSugPrice(self, response):
data = json.loads(response.text, encoding="utf8")
selector = Selector(data['html'])
eligibleObj = json.loads(selector.css('a::attr(data-estimated-earnings)').get())
openEligible = response.meta['car']['openEligible']
if response.meta['car']['carBrandID'] == 'other' or response.meta['car']['carModelID'] == 'other':
response.meta['car']['correctSuggestedPrice'] = -1 # No estimation available
else:
if response.meta['car']['open'] == False and openEligible == True:
estimEarnings = int(re.sub("\D",'',selector.css('span.car_model_estimation_result_amount::text').get()))
correctedEstimEarnings = estimEarnings/1.25
response.meta['car']['correctSuggestedPrice'] = correctedEstimEarnings/20 # Suggested price based corrected earnings condition on open decision (open is true or false) and that this decision was the same at firstReviewYear.
elif response.meta['car']['open'] == True and openEligible == False:
estimEarnings = int(re.sub("\D",'',selector.css('span.car_model_estimation_result_amount::text').get()))
correctedEstimEarnings = estimEarnings*1.25
response.meta['car']['correctSuggestedPrice'] = correctedEstimEarnings/15 # Suggested price based corrected earnings condition on open decision (open is true or false) and that this decision was the same at firstReviewYear.
elif response.meta['car']['open'] == True and openEligible == True:
estimEarnings = int(re.sub("\D",'',selector.css('span.car_model_estimation_result_amount::text').get()))
response.meta['car']['correctSuggestedPrice'] = estimEarnings/20 # Suggested price based corrected earnings condition on open decision (open is true or false) and that this decision was the same at firstReviewYear.
else:
estimEarnings = int(re.sub("\D",'',selector.css('span.car_model_estimation_result_amount::text').get()))
response.meta['car']['correctSuggestedPrice'] = estimEarnings/15 # Suggested price based corrected earnings condition on open decision (open is true or false) and that this decision was the same at firstReviewYear.
yield response.meta['car']
Did I miss something ?
dont_filter=True should be inside scrapy.Request meta dict:
meta = {'dont_filter': True , 'car':...}

Add lines to stock picking move lines

In my method, I delete lines from stock.pickings and want to add different lines from my model. but i get an error AttributeError: 'stock.move' object has no attribute 'get'
#api.multi
def _action_procurement_create(self):
res = super(SaleOrderLine, self)._action_procurement_create()
order_line_bom = self.env['sale.order.line.bom'].search([('sale_order_line_id', '=', self.id )])
stock_move_lines = self.env['stock.move']
created_stock_move_lines = self.env['stock.move']
vals = {}
for order in self.order_id:
if self.product_id.bom_ids:
order.picking_ids.move_lines.state = 'draft'
for move_line in order.picking_ids.move_lines:
move_line.unlink()
for bom_line in order_line_bom:
vals['product_id'] = bom_line.product_id.id,
vals['product_uom'] = 1,
vals['location_id'] = 1,
vals['name'] = bom_line.product_id.name,
vals['location_dest_id'] = 1,
created_stock_move_lines += stock_move_lines.create(vals)
order.create(stock_move_lines)
You have defined:
stock_move_lines = self.env['stock.move']
Then you try to pass it to create method:
order.create(stock_move_lines)
As documented in model.py
:param dict vals:
values for the model's fields, as a dictionary::
{'field_name': field_value, ...}
see :meth:`~.write` for details
Please try may it's help you :
#api.multi
def _action_procurement_create(self):
res = super(SaleOrderLine, self)._action_procurement_create()
order_line_bom = self.env['sale.order.line.bom'].search([('sale_order_line_id', '=', self.id )])
stock_move_lines = self.env['stock.move']
created_stock_move_lines = self.env['stock.move']
vals = {}
for order in self.order_id:
if self.product_id.bom_ids:
order.picking_ids.move_lines.state = 'draft'
for move_line in order.picking_ids.move_lines:
move_line.unlink()
for bom_line in order_line_bom:
vals = {
'product_id': bom_line.product_id.id,
'product_uom': 1,
'location_id': 1,
'name': bom_line.product_id.name,
'location_dest_id': 1,
}
created_stock_move_lines += stock_move_lines.create(vals)
order.create(stock_move_lines)

Hide field form fields_view_get

def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
#override of fields_view_get in order to change the label of the process button and the separator accordingly to the shipping type
if context is None: context={}
res = super(stock_partial_picking, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu) type = context.get('default_type', False)
if type: doc = etree.XML(res['arch'])
for node in doc.xpath("//button[#name='do_partial']"):
if type == 'in':
node.set('string', _('_Receive'))
elif type == 'out':
node.set('string', _('_Deliver'))
i want to hide qty,expire_date field
for node in doc.xpath("//separator[#name='product_separator']"):
if type == 'in':
node.set('string', _('Receive Products'))
elif type == 'out':
node.set('string', _('Deliver Products'))
res['arch'] = etree.tostring(doc)
return res
Set the "invisible" = "1",
for node in doc.xpath("//separator[#name='product_separator']"):
if type == 'in':
node.set('string', _('Receive Products'))
node.set('invisible', '1')
elif type == 'out':
node.set('string', _('Deliver Products'))
res['arch'] = etree.tostring(doc)
return res
You can hide a filed in a similar mannar as xml view, but you must use modify attribute like attrs.
Anyway the result let it be like this:
<field name="fieldname" modifiers={'invisible': True} />
(In my experience, if I want to take a condition to visibility, not working in fields_view_get)
Try this:
To hide the field from form view: using fields_view_get function
doc = etree.XML(res['arch'])
if show_hp_status:
for node in doc.xpath("//field[#name='status_1']"):
#use either 'invisible' or node.remove
node.set('invisible', '1')
doc.remove(node)
else:
for node in doc.xpath("//field[#name='status_2']"):
#use either 'invisible' or node.remove
node.set('invisible', '1')
doc.remove(node)
res['arch'] = etree.tostring(doc)