data i entered in the admin page not reflecting in my webpage, - django-templates

The data i entered in the admin page not showing up on the web page.
i don't know what is going wrong with my code, please help
webpage
admin.py
from django.contrib import admin
from blog.models import Post
# Register your models here.
class PostAdmin(admin.ModelAdmin):
list_display = ('title','slug','status','created_on')
list_filter = ('status',)
search_fields = ['title','content']
prepopulated_fields = {'slug':('title',)}
admin.site.register(Post, PostAdmin)
urls.py
from . import views
from django.urls import path
urlpatterns = [
path('blogspot/',views.PostList.as_view(), name="b"),
path('<slug:slug>/',views.PostDetail.as_view(), name="post_detail"),
]
views.py
from django.views import generic
from .models import Post
# Create your views here.
class PostList(generic.ListView):
queryset = Post.objects.filter(status=1).order_by('-created_on')
template_name = 'blog/index.html'
class PostDetail(generic.DetailView):
model = Post
template_name = 'blog/post_detail.html'

Related

Why does my old folium app stopped show maps [duplicate]

I wrote a python test program like this to show openstreetmap:
from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import QUrl
from PyQt5.QtWebEngineWidgets import QWebEngineView
import sys
def mainPyQt5():
url = 'file:///./index.html'
app = QApplication(sys.argv)
browser = QWebEngineView()
browser.load(QUrl(url))
browser.show()
sys.exit(app.exec_())
mainPyQt5()
index.html fetched by QWebEngineView simply calls openstreetmap:
<title>OSM and Leaflet</title>
<link rel = "stylesheet" href = "http://cdn.leafletjs.com/leaflet-0.7.3/leaflet.css"/>
<div id = "map" style = "width: 900px; height: 580px"></div><script src = "http://cdn.leafletjs.com/leaflet-0.7.3/leaflet.js"></script>
<script>
// Creating map options
var mapOptions = {
center: [45.641174, 9.114828],
zoom: 10
}
// Creating a map object
var map = new L.map('map', mapOptions);
// Creating a Layer object
var layer = new L.TileLayer('http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png');
// Adding layer to the map
map.addLayer(layer);
</script>
If I fetch index.html with a ordinary browser the map is shown as expected but if I call the simple python program using QWebEngineView no tiles are downloaded from openstreetmap. If I replace openstreetmap with maps.stamen.com everything is fine both with a browser or the python script.
By default QtWebEngine does not set default headers like popular browsers do. In this case the openstreetmap server needs to know the "Accept-Language" to produce the maps since for example the names of the cities will depend on the language to filter non-browser traffic. The solution is to implement a QWebEngineUrlRequestInterceptor that adds that header:
import os.path
import sys
from PyQt5.QtCore import QUrl
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWebEngineCore import QWebEngineUrlRequestInterceptor
from PyQt5.QtWebEngineWidgets import QWebEngineView
class Interceptor(QWebEngineUrlRequestInterceptor):
def interceptRequest(self, info):
info.setHttpHeader(b"Accept-Language", b"en-US,en;q=0.9,es;q=0.8,de;q=0.7")
def mainPyQt5():
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
filename = os.path.join(CURRENT_DIR, "index.html")
app = QApplication(sys.argv)
browser = QWebEngineView()
interceptor = Interceptor()
browser.page().profile().setUrlRequestInterceptor(interceptor)
browser.load(QUrl.fromLocalFile(filename))
browser.show()
sys.exit(app.exec_())
if __name__ == "__main__":
mainPyQt5()

Object relational mapping ('and_' not working)

and_ is not working
can anyone help me out. i am new to flask
test.py
from flask import session,Flask,render_template,request
from models import *
import os
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = os.getenv("DATABASE_URL")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db.init_app(app)
#app.route('/')
def index():
query= book.query.filter(and_ (book.title=='we all lost',book.publication_year==2050)).all()
for i in query:
print(i.title)
return render_template('hellotesting.html',m=query)
hellotesting.html
<html>
<head></head>
<body>
<ol>
<li>{{m.title}} </li>
<li>{{m.author}} </li>
<li>{{m.publication_year}}</li>
</ol>
</body>
</html>
error
NameError
NameError: name 'and_' is not defined
i dont know why it is not working
You need to import it before you can use it
from sqlalchemy import and_
and then
query = book.query.filter(and_(book.title=='we all lost',book.publication_year==2050)).all()
Also in your hellotesting.html file you try to display attribute of queryset (something like list of your model instances), not particular object and it will raise exception. You have to get any object from queryset in function or in template before call it's attribute.
In function you can do something like
query = book.query.filter(and_(book.title=='we all lost',book.publication_year==2050)).first()
or
book = query[0]
and then put that object to render_template function or you can do something similar in template like
{{ m[0].title }}
Read documentation for more examples and explenations

Django generic Views with templates

I've added a new template to my project (thing_listings.html) and I've added the views;
from django.views import generic
from .models import Things
class IndexView(generic.ListView):
template_name = 'home/index.html'
def get_queryset(self):
return Things.objects.all()
**class ThingView(generic.ListView):
template_name = 'home/thing_listings.html'
def get_queryset(self):
return Things.objects.all()**
class DetailView(generic.DetailView):
model = Labs
template_name = 'home/detail.html'
and the URl's;
from django.conf.urls import url
from . import views
app_name = 'home'
urlpatterns = [
# /home/
url(r'^$', views.IndexView.as_view(), name = 'index'),
**# /thingview/
url(r'^$', views.ThingView.as_view(), name='thingview'),**
# /home/"details"/
url(r'^(?P<pk>[0-9]+)/$', views.DetailView.as_view(), name='detail'),
]
At the moment the site runs fine, except when I click on the thing_listings link I just get directed to index instead of what thing view is supposed to direct me to. Please help, I'm not sure where I've gone wrong.
Ive used the href: {% url 'home:thingview' %}
I've found the solution if anyone else is having the same issue.
All you should need to do is add the path to your regular expression eg:
url(r'^servicesview/$', views.ServicesView.as_view(), name='services'),
I've repeated the process multiple times to make sure it works.

Scrapy ends after first result

I've been looking around and cant find the answer I'm looking for. I got my crawler (scrapy) to return the results close to what i'm looking for. So What I'm trying to do now is get it to pull the multiple results from the page. Currently it pulls the first one and stops. If I take off the extract_first() then it pulls all the data and groups them. So looking for one of 2 answers that would work.
1) continue crawling results and not ending
2) ungroup each item onto a new line of results
Here is my code:
import scrapy
from scrapy.selector import Selector
from urlparse import urlparse
from urlparse import urljoin
from scrapy import Request
from scrapy.spiders import CrawlSpider, Rule
from scrapy.selector import HtmlXPathSelector
#from scrappy.http import HtmlResponse
class MySpider(CrawlSpider):
name = "ziprecruiter"
def start_requests(self):
allowed_domains = ["https://www.ziprecruiter.com/"]
urls = [
'https://www.ziprecruiter.com/candidate/search?search=operations+manager&location=San+Francisco%2C+CA'
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
for houses in response.xpath('/html/body'):
yield {
'Job_title:' : houses.xpath('.//span[#class="just_job_title"]//text()[1]').extract_first(),
'Company:' : houses.xpath('.//a[#class="t_org_link name"]//text()[1]').extract_first(),
'Location:' : houses.xpath('.//a[#class="t_location_link location"]//text()[1]').extract_first(),
'FT/PT:' : houses.xpath('.//span[#class="data_item"]//text()[1]').extract_first(),
'Link' : houses.xpath('/html/body/main/div/section/div/div[2]/div/div[2]/div[1]/article[4]/div[1]/button[1]/text()').extract_first(),
'Link' : houses.xpath('.//a/#href[1]').extract_first(),
'pay' : houses.xpath('./section[#class="perks_item"]/span[#class="data_item"]//text()[1]').extract_first()
}
Thank you in advance!
EDIT::
After more research I redefined the container to crawl in and that gives me all the right answers. Now my question is how do I get each item on the page instead of only the first result... it just doesn't loop. Heres my code:
import scrapy
from scrapy.selector import Selector
from urlparse import urlparse
from urlparse import urljoin
from scrapy import Request
from scrapy.spiders import CrawlSpider, Rule
from scrapy.selector import HtmlXPathSelector
#from scrappy.http import HtmlResponse
class MySpider(CrawlSpider):
name = "ziprecruiter"
def start_requests(self):
allowed_domains = ["https://www.ziprecruiter.com/"]
urls = [
'https://www.ziprecruiter.com/candidate/search?search=operations+manager&location=San+Francisco%2C+CA'
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
for houses in response.xpath('/html/body/main/div/section/div/div[2]/div/div[2]/div[1]/article[1]/div[2]'):
yield {
'Job_title:' : houses.xpath('.//span[#class="just_job_title"]//text()').extract(),
'Company:' : houses.xpath('.//a[#class="t_org_link name"]//text()').extract(),
'Location:' : houses.xpath('.//a[#class="t_location_link location"]//text()').extract(),
'FT/PT:' : houses.xpath('.//span[#class="data_item"]//text()').extract(),
'Link' : houses.xpath('.//a/#href').extract(),
'pay' : houses.xpath('./section[#class="perks_item"]/span[#class="data_item"]//text()').extract()
}
Seems to me that you should use this xpath instead:
//div[#class="job_content"]
As that is the class of the div you're looking for. When I execute it for this page, I get 20 div elements returned. However, you might want to add some more filtering to the xpath query just in case there are other divs with that class name that you don't want to parse.

What is the right way to post image to REST API and gather data with Falcon library?

I try to post an image to process it through my REST API. I use falcon for the backend but could not figure out how to post and receive the data.
This is how I currently send my file
img = open('img.png', 'rb')
r = requests.post("http://localhost:8000/rec",
files={'file':img},
data = {'apikey' : 'bla'})
However at the Falcon repo they say that Falcon does not support HTML forms to send data instead it aims full scope of POSTed and PUTed data which I do not differentiate POSTed image data and the one sent as above.
So eventually, I like to learn what is the right workaround to send a image and receive it by a REST API which is supposedly written by Falcon. Could you give some pointers?
For this you can use the following approach:
Falcon API Code:
import falcon
import base64
import json
app = falcon.API()
app.add_route("/rec/", GetImage())
class GetImage:
def on_post(self, req, res):
json_data = json.loads(req.stream.read().decode('utf8'))
image_url = json_data['image_name']
base64encoded_image = json_data['image_data']
with open(image_url, "wb") as fh:
fh.write(base64.b64decode(base64encoded_image))
res.status = falcon.HTTP_203
res.body = json.dumps({'status': 1, 'message': 'success'})
For API call:
import requests
import base64
with open("yourfile.png", "rb") as image_file:
encoded_image = base64.b64encode(image_file.read())
r = requests.post("http://localhost:8000/rec/",
data={'image_name':'yourfile.png',
'image_data':encoded_image
}
)
print(r.status_code, r.reason)
I hope this will help.