Flask - How can I use my functions on the URL - flask-sqlalchemy

I'm starting to learn Flask and maybe I'm just using the wrong words to search, but here's the problem.
I have this class:
class Video(Resource):
#marshal_with(resource_fields)
def get(self, video_id):
result = VideoModel.query.filter_by(id=video_id).first()
if not result:
abort(404, message="Could not find video with that id")
return result
#marshal_with(resource_fields)
def put(self, video_id):
args = video_put_args.parse_args()
result = VideoModel.query.filter_by(id=video_id).first()
if result:
abort(409, message="Video id taken...")
video = VideoModel(id=video_id, name=args['name'], views=args['views'], likes=args['likes'])
db.session.add(video)
db.session.commit()
return video, 201
#marshal_with(resource_fields)
def patch(self, video_id):
args = video_update_args.parse_args()
result = VideoModel.query.filter_by(id=video_id).first()
if not result:
abort(404, message="Video doesn't exist, cannot update")
if args['name']:
result.name = args['name']
if args['views']:
result.views = args['views']
if args['likes']:
result.likes = args['likes']
db.session.commit()
return result
And I'm trying to make so when I have an URL like http://127.0.0.1/5000/video/put/1/Test/80/2, I can use the funtion from the URL by typing it. Is there any way to do this? Thanks in advance!

Related

How do I connect items from one parse method to another?

'''
import scrapy
from ..items import GooddealItem
class FarmtoolsSpider(scrapy.Spider):
name = 'farmtools'
allowed_domains = ['www.gooddeal.com']
start_urls = ['https://www.gooddeal.com/all?
source=private&sort=publishdate%20desc']
def parse(self, response):
items = GooddealItem()
rows = response.xpath('//ul[#class="card-collection"]/li')
for row in rows:
link = row.xpath('.//a/#href').get() #this is the full link.
link_split = link.split('/')[-1] #this splits the url link th first time.
linkid = link_split.split('?')[0] #this splits it the second time.
title = row.xpath('.//div[1]/p[#class="card__body-title"]/text()').get()
county = row.xpath('.//a/div/div[2]/div[1]/ul[#class="card__body-keyinfo"]/li[contains(text(),"min")]/following-sibling::node()/text()').get()
price = row.xpath('.//p[#class="card__price"]/span[1]/text()').get()
subcat = row.xpath('.//a/div/div[2]/div[1]/p[2]/text()[2]').get()
zero = row.xpath('.//a/div/div[2]/div[1]/ul[#class="card__body-keyinfo"]/li[contains(text(),"min")]/text()').get()
if zero == '0 min':
items['linkid'] = linkid
items['title'] = title
items['county'] = county
items['price'] = price
items['subcat'] = subcat
items['zero'] = zero
items['link'] = link
yield response.follow(url = link, callback=self.parse_item_page)
def parse_item_page(self, response):
items = GooddealItem()
rows = response.xpath('/html/body[1]')
for row in rows:
category = row.xpath('.//main/div/div[1]/div/div[1]/div/nav/span/a[1]/span/text()').get(),
views = row.xpath('.//main/div/div[1]/div/div[2]/div[2]/div[1]/div[3]/div[1]/div/div[1]/div/div/span[2]/text()').get(),
seller_id = row.xpath('.//main/div/div[1]/div/div[2]/div[2]/div[2]/div[3]/div/div[1]/div[1]/div[2]/a/#href').get(),
seller_ads = row.xpath('.//main/div/div[1]/div/div[2]/div[2]/div[2]/div[3]/div/div[2]/div/dl[3]/dd/text()').get(),
lifetime_ads = row.xpath('//main/div/div[1]/div/div[2]/div[2]/div[2]/div[3]/div/div[2]/div/dl[4]/dd/text()').get()
items['category'] = category
items['views'] = views
items['seller_id'] = seller_id
items['seller_ads'] = seller_ads
items['lifetime_ads'] = lifetime_ads
yield items
'''
I'm stuck on this as it's my first attempt. When I run the code I'm just getting back:
2020-07-12 22:53:21 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.gooddeal.com/dogs-for-sale/dachshunds/25348559>
{'category': (None,),
'lifetime_ads': None,
'seller_ads': (None,),
'seller_id': (None,),
'views': (None,)}
Any help will be appreciated, thanks
I'm assuming you want the data scraped in parse method to be joined together with the data scraped in the parse_item_page.
If you are using Scrapy v1.7+ you can use cb_kwargs when building the request.
This parameter receives a dict with arbitrary data that will be used as argument in the callback function. So you would have to do something like this in your request:
...
yield response.follow(url = link, callback=self.parse_item_page, cb_kwargs={'scraped_item': items})
For this to work, you also need to change the callback function to receive this parameter. Like this:
def parse_item_page(self, response, scraped_item):
...
Scrapy will take care of sending the scraped_item when calling the parse_item_page.
If you are using Scrapy v1.6 or older:
You will need to use the meta parameter. This method still works in more recent versions, but cb_kwargs(solution above) are preferable.
When building the request you will use the meta parameter to include some arbitrary data in the request. The data will be accessible in the response object that the callback function receives. Your request should look like this:
...
yield response.follow(url = link, callback=self.parse_item_page, meta={'scraped_item': items})
In this case you will access the data by calling response.meta:
def parse_item_page(self, response):
items = response.meta.get('scraped_item') #response.meta is a dict
...

Is it possible to define variables if a condition is satisfied in karate framework?

Eg. If insurance = true(in JSON response) then def variables(insurance_img_cancel, insurance_img_refund, insurance_details etc.) from api response (which will be used in next api in the chain) else throw message "Insurance unavailable".
Given url postEligibility
And def eligibilityRequestBody = read('eligibilityCPReq.json')
And request eligibilityRequestBody
And print eligibilityRequestBody
And def cookie = read('cookie.txt')
And header cookie = cookie
When method Post
Then status 200
And print response
And match **response.data.insuranceAvailable == 'true'**
And def insurance_img_cancel = response.data.img_cancel
And def insurance_img_refund = response.data.img_refund
And def insurance_details = response.data.insurance_details
And def insurance_end_date = response.data.insurance_end_date
And def insurance_id = response.data.insurance_details[0].insurance_id
And def insurance_premium = response.data.insurance_details[0].premium
And def insurance_type = response.data.insurance_type
And def insuranceAvailable = response.data.insuranceAvailable
Isn't that match sufficient:
And match response.data.insuranceAvailable == 'true'
So if it is not true it will fail the test. That's what you want right ?
Else please simplify the question: https://stackoverflow.com/help/minimal-reproducible-example

Cant handle a response error on django-rest-framework

I am trying to return 400 bad requets response when a user with a company created tries to create another one on my API
I tried with a Response but it does not work, it seems that it does not enter on the if
class CompanyViewSet(generics.ListCreateAPIView):
serializer_class = CompanySerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
return UserCompany.objects.filter(owner=self.request.user.id)
def perform_create(self, serializer):
queryset = UserCompany.objects.filter(owner=self.request.user.id)
if queryset.exists():
content = {'API response error:': 'Can have only a one company for every user'}
return Response(content, status=status.HTTP_400_BAD_REQUEST)
else:
serializer.save(owner=self.request.user)
When I create a company in user that already has one returns state 200 but don't create it, I expected it to return state 400
Because perform_create only save serializer . If you want control status response, you should override create. Try like this:
def create(self, request, *args, **kwargs):
queryset = UserCompany.objects.filter(owner=self.request.user.id)
if queryset.exists():
content = {'API response error:': 'Can have only a one company for every user'}
return Response(content, status=status.HTTP_400_BAD_REQUEST)
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)

Telegram bot: How to get chosen inline result

I'm sending InlineQueryResultArticle to clients and i'm wondering how to get chosen result and it's data (like result_id,...).
here is the code to send results:
token = 'Bot token'
bot = telegram.Bot(token)
updater = Updater(token)
dispatcher = updater.dispatcher
def get_inline_results(bot, update):
query = update.inline_query.query
results = list()
results.append(InlineQueryResultArticle(id='1000',
title="Book 1",
description='Description of this book, author ...',
thumb_url='https://fakeimg.pl/100/?text=book%201',
input_message_content=InputTextMessageContent(
'chosen book:')))
results.append(InlineQueryResultArticle(id='1001',
title="Book 2",
description='Description of the book, author...',
thumb_url='https://fakeimg.pl/300/?text=book%202',
input_message_content=InputTextMessageContent(
'chosen book:')
))
update.inline_query.answer(results)
inline_query_handler = InlineQueryHandler(get_inline_results)
dispatcher.add_handler(inline_query_handler)
I'm looking for a method like on_inline_chosen(data) to get id of the chosen item. (1000 or 1001 for snippet above) and then send the appropriate response to user.
You should set /setinlinefeedback in #BotFather, then you will get this update
OK, i got my answer from here
Handling user chosen result:
from telegram.ext import ChosenInlineResultHandler
def on_result_chosen(bot, update):
print(update.to_dict())
result = update.chosen_inline_result
result_id = result.result_id
query = result.query
user = result.from_user.id
print(result_id)
print(user)
print(query)
print(result.inline_message_id)
bot.send_message(user, text='fetching book data with id:' + result_id)
result_chosen_handler = ChosenInlineResultHandler(on_result_chosen)
dispatcher.add_handler(result_chosen_handler)

Scrapy Splash won't execute lua script

I have ran across an issue in which my Lua script refuses to execute. The returned response from the ScrapyRequest call seems to be an HTML body, while i'm expecting a document title. I am assuming that the Lua script is never being called as it seems to have no apparent effect on the response. I have dug a lot through the documentation and can't quite seem to figure out what is missing here. Does anyone have any suggestions?
from urlparse import urljoin
import scrapy
from scrapy_splash import SplashRequest
GOOGLE_BASE_URL = 'https://www.google.com/'
GOOGLE_QUERY_PARAMETERS = '#q={query}'
GOOGLE_SEARCH_URL = urljoin(GOOGLE_BASE_URL, GOOGLE_QUERY_PARAMETERS)
GOOGLE_SEARCH_QUERY = 'example search query'
LUA_SCRIPT = """
function main(splash)
assert(splash:go(splash.args.url))
return splash:evaljs("document.title")
end
"""
SCRAPY_CRAWLER_NAME = 'google_crawler'
SCRAPY_SPLASH_ENDPOINT = 'render.html'
SCRAPY_ARGS = {
'lua_source': LUA_SCRIPT
}
def get_search_url(query):
return GOOGLE_SEARCH_URL.format(query=query)
class GoogleCrawler(scrapy.Spider):
name=SCRAPY_CRAWLER_NAME
search_url = get_search_url(GOOGLE_SEARCH_QUERY)
def start_requests(self):
response = SplashRequest(self.search_url,
self.parse, endpoint=SPLASH_ENDPOINT, args=SCRAPY_ARGS)
yield response
def parse(self, response):
doc_title = response.body_as_unicode()
print doc_title
'endpoint' argument of SplashRequest must be 'execute' in order to execute a Lua script; it is 'render.html' in the example.
LUA_SCRIPT = """
function main(splash)
assert(splash:go(splash.args.url))
return title = splash:evaljs("document.title")
end
"""
def start_requests(self):
SplashRequest(self.search_url,self.parse, endpoint='execute',args=SCRAPY_ARGS)
You can recover the value with response.data['title']