Webscraping several URLs into panda df - pandas

Need some help appending several webscraping resaults to a panda df.
Currently im only getting the output from one of the URLs to the DF.
I left out the URLs, if you need them i will supply them to you.
##libs
import bs4
import requests
import re
from time import sleep
import pandas as pd
from bs4 import BeautifulSoup as bs
##webscraping targets
URLs = ["URL1","URL2","URL3"]
## Get columns
column_list = []
r1 = requests.get(URLs[0])
soup1 = bs(r1.content)
data1 = soup1.find_all('dl', attrs= {"class": "border XSText rightAlignText noMarginTop highlightOnHover thickBorderBottom noTopBorder"})
columns = soup1.find_all('dt')
for col in columns:
column_list.append(col.text.strip()) # strip() removes extra space from the text
##Get values
value_list = []
for url in URLs:
r1 = requests.get(url)
soup1 = bs(r1.content)
data1 = soup1.find_all('dl', attrs= {"class": "border XSText rightAlignText noMarginTop highlightOnHover thickBorderBottom noTopBorder"})
values = soup1.find_all('dd')
for val in values:
value_list.append(val.text.strip())
df=pd.DataFrame(list(zip(column_list,value_list)))
df.transpose()
Current output only showing the resaults of one URL:
Expected output:

The problem here is with your zip function. It will only zip the values until the length of the shortest list, in this case, the column_list. Leaving all the other values unused.
If you want to append the other values to the dataframe as well you will have to iterate over then. So change the last two lines on your code to this and it should work:
result = [[i] for i in column_list]
for i, a in enumerate(value_list):
result[i % len(column_list)].extend([a])
df = pd.DataFrame(result)
df.transpose()

Related

Copy/assign a Pandas dataframe based on their name in a for loop

I am relatively new with python - and I am struggling to do the following:
I have a set of different data frames, with sequential naming (df_i), which I want to access in a for loop based on their name (with an string), how can I do that? e.g.
df_1 = pd.read_csv('...')
df_2 = pd.read_csv('...')
df_3 = pd.read_csv('...')
....
n_df = 3
for i in range(len(n_df)):
df_namestr= 'df_' + str(i+1)
# ---------------------
df_temp = df_namestr
# ---------------------
# Operate with df_temp. For i+1= 1, df_temp should be df_1
Kind regards,
DF
You can try something like that:
for n in range(1, n_df+1):
df_namestr = f"df_{n}"
df_tmp = locals().get(df_namestr)
if not isinstance(df_tmp, pd.DataFrame):
continue
print(df_namestr)
print(df_tmp)
Refer to the documentation of locals() to know more.
Would it be better to approach the accessing of multiple dataframes by reading them into a list?
You could put all the csv files required in a subfolder and read them all in. Then they are in a list and you can access each one as an item in that list.
Example:
import pandas as pd
import glob
path = r'/Users/myUsername/Documents/subFolder'
csv_files = glob.glob(path + "/*.csv")
dfs = []
for filename in csv_files:
df = pd.read_csv(filename)
dfs.append(df)
print(len(dfs))
print(dfs[1].head())

Why my code is giving me data in 1 column it should give me in two different column

i need to know what is happening in my code? it should give data in separate columns it is giving me same data in a oath columns.
i tried to change the value of row variable but it didn't found the reason
import requests
import csv
from bs4 import BeautifulSoup
import pandas as pd
import time
arrayofRequest= []
prices=[]
location=[]
columns=['Price', 'Location']
df = pd.DataFrame(columns=columns)
for i in range(0,50):
arrayofRequest.append("https://www.zameen.com/Homes/Karachi-2-"+str(i+1)+".html?gclid=Cj0KCQjw3JXtBRC8ARIsAEBHg4mj4jX1zZUt3WzGScjH6nfwzrEqkuILarcmg372imSneelSXPj0fGIaArNeEALw_wcB")
request = requests.get(arrayofRequest[i])
soupobj= BeautifulSoup(request.content,"lxml")
# print(soupobj.prettify())
links =soupobj.find_all('span',{'class':'f343d9ce'})
addresses =soupobj.find_all('div',{'class':'_162e6469'})
price = ""
for i in range(0,len(links)):
price = str(links[i]).split(">")
price = price[len(price)-2].split("<")[0]
prices.append(price)
address = str(addresses[i]).split(">")
address = address[len(address)-2].split("<")[0]
location.append(address)
row=location[i]+","+prices[i]
df = df.append(pd.Series(row, index=columns), ignore_index=False)
# filewriter = csv.writer(csvfile, delimiter=',',filewriter.writerow(['Price', 'Location']),filewriter.writerow([prices[0],location[0]])
df.to_csv('DATA.csv', index=False)
because of this:
pd.Series(row, index=columns)
try smthg like
pd.DataFrame([[locations[i], prices[i]]], index=columns))
However this could be done only once outside of your for loop
pd.DataFrame(list(zip(locations, prices)), index=columns))

'int' object has no attribute 'replace' error in python3.x

I don't get why this error occurs. Coz from my point of view the three columns 'WWBO','IBO','DBO' has exact same structure but when I apply 'replace' only WWBO works. Does it have sth with fillna?
Need your help!
import requests
from bs4 import BeautifulSoup as bs
#Read url
URL = "https://www.the-numbers.com/box-office-records/worldwide/all- movies/cumulative/released-in-2019"
data = requests.get(URL).text
#parse url
soup = bs(data, "html.parser")
#find the tables you want
table = soup.findAll("table")[1:]
#read it into pandas
df = pd.read_html(str(table))
#concat both the tables
df = pd.concat([df[0],df[1]])
df = df.rename(columns={'Rank':'Rank',
'Movie':'Title',
'Worldwide Box Office':'WWBO',
'Domestic Box Office':'DBO',
'International Box Office':'IBO',
'DomesticShare':'Share'})
#drop columns
market = df.drop(columns=['Rank','Share'])
market = market.fillna(0)
#replace $ -> ''
market['WWBO'] = market['WWBO'].map(lambda s: s.replace('$',''))
market['IBO'] = market['IBO'].map(lambda s: s.replace('$',''))
market['DBO'] = market['DBO'].map(lambda s: s.replace('$',''))
market
Error is:::
AttributeError: 'int' object has no attribute 'replace'
it is Pandas bugs auto casting '0' values to int, to solutions for this either eliminate the 0 value or cast the columns to string as below
import pandas as pd
import requests
from bs4 import BeautifulSoup as bs
#Read url
URL = "https://www.the-numbers.com/box-office-records/worldwide/all-movies/cumulative/released-in-2019"
data = requests.get(URL).text
#parse url
soup = bs(data, "html.parser")
#find the tables you want
table = soup.findAll("table")[1:]
#read it into pandas
df = pd.read_html(str(table))
#concat both the tables
df = pd.concat([df[0],df[1]])
df = df.rename(columns={'Rank':'Rank',
'Movie':'Title',
'Worldwide Box Office':'WWBO',
'Domestic Box Office':'DBO',
'International Box Office':'IBO',
'DomesticShare':'Share'})
#drop columns
market = df.drop(columns=['Rank','Share'])
market = market.fillna(0)
#replace $ -> ''
market['WWBO'] = market['WWBO'].map(lambda s: s.replace('$',''))
market['IBO']=market['IBO'].astype(str)
market['IBO'] = market['IBO'].map(lambda s: s.replace('$',''))
market['DBO']=market['DBO'].astype(str)
market['DBO'] = market['DBO'].map(lambda s: s.replace('$',''))
>> market[['WWBO','IBO','DBO']]
WWBO IBO DBO
0 2,622,240,021 1,842,814,023 779,425,998
1 1,121,905,659 696,535,598 425,370,061
2 692,163,684 692,163,684 0
3 518,883,574 358,491,094 160,392,480
4 402,976,036 317,265,826 85,710,210
5 358,234,705 220,034,625 138,200,080
6 342,904,508 231,276,537 111,627,971
7 326,150,303 326,150,303 0
8 293,766,097 192,548,368 101,217,729
9 255,832,826 255,832,826 0
10 253,940,650 79,203,380 174,737,270
11 245,303,505 134,268,500 111,035,005
12 190,454,964 84,648,456 105,806,508
13 155,313,390 98,312,634 57,000,756
clearly one or more of these fields(market['WWBO'], market['IBO'], market['DBO']) have integer values and you are trying to perform string operation i.e. replace over it that's it is throwing error that
AttributeError: 'int' object has no attribute 'replace'
could you first print those values and see what are they or if you have many then its better to perform type check first like
if market['WWBO'].dtype == object:
market['WWBO'].map(lambda s: s.replace('$',''))
else:
pass
let me know if this works for you or not

Use pandas to read the csv file with several uncertain factors

I have asked the related question of string in: Find the number of \n before a given word in a long string. But this method cannot solve the complicate case I happened to. Thus I want to find out a solution of Pandas here.
I have a csv file (I just represent as a string):
csvfile = 'Idnum\tId\nkey:maturity\n2\nmaturity\tpara1\tpara2\n1Y\t0\t0\n2Y\t0\t0'
I want to use the pandas:
value = pandas.read_csv(csvfile, sep = '\t', skiprows = 3).set_index('maturity')
to obtain the table like:
and set the first columan maturity as index.
But there are several uncertain factors in the csvfile:
1..set_index('maturity'), the key maturity
of index is included in the row key: maturity. Then I should find the row key: xxxx and obtain the string xxxx
2.skiprows = 3: the number of skipped rows before the title:
is uncertain. The csvfile can be something like:
'Idnum\tId\nkey:maturity\n2\n\n\n\n\n\nmaturity\tpara1\tpara2\n1Y\t0\t0\n2Y\t0\t0'
I should find the row number of title (namely the row beginning with xxxx found in the rowkey: xxxx).
3.sep = '\t': the csvfile may use space as separator like:
csvfile = 'Idnum Id\nkey: maturity\n2\nmaturity para1 para2\n1Y 0 0\n2Y 0 0'
So is there any general code of pandas to deal with the csvfile with above uncertain factors?
Actually the string:
csvfile = 'Idnum\tId\nkey:maturity\n2\nmaturity\tpara1\tpara2\n1Y\t0\t0\n2Y\t0\t0'
is from a StringIO: data
data.getvalue() = 'Idnum\tId\nkey:maturity\n2\nmaturity\tpara1\tpara2\n1Y\t0\t0\n2Y\t0\t0'
I am not familiar with this structure and even I want to obtain a table of original data without any edition by using:
value = pandas.read_csv(data, sep = '\t')
There will be a error.
You can read the file line by line, collecting the necessary information and then pass the remainder to pd.read_csv with the appropriate arguments:
from io import StringIO
import re
import pandas as pd
with open('data.csv') as fh:
key = next(filter(lambda x: x.startswith('key:'), fh)).lstrip('key:').strip()
header = re.split('[ \t]+', next(filter(lambda x: x.startswith(key), fh)).strip())
df = pd.read_csv(StringIO(fh.read()), header=None, names=header, index_col=0, sep=r'\s+')
Example for data via StringIO:
fh = StringIO('Idnum\tId\nkey:maturity\n2\nmaturity\tpara1\tpara2\n1Y\t0\t0\n2Y\t0\t0')
key = next(filter(lambda x: x.startswith('key:'), fh)).lstrip('key:').strip()
header = re.split('[ \t]+', next(filter(lambda x: x.startswith(key), fh)).strip())
df = pd.read_csv(fh, header=None, names=header, index_col=0, sep=r'\s+')
If you do not mind reading the csv file twice you can try doing something like:
from io import StringIO
csvfile = 'Idnum\tId\nkey:maturity\n2\nmaturity\tpara1\tpara2\n1Y\t0\t0\n2Y\t0\t0'
data = pd.read_csv(StringIO(csvfile), sep='\t', error_bad_lines=False, header=None)
skiprows = len(data)
pd.read_csv(StringIO(csvfile), sep='\t', skiprows=skiprows)
same for you other example:
csvfile = 'Idnum\tId\nkey:maturity\n2\n\n\n\n\n\nmaturity\tpara1\tpara2\n1Y\t0\t0\n2Y\t0\t0'
data = pd.read_csv(StringIO(csvfile), sep='\t', error_bad_lines=False, header=None)
skiprows = len(data)
pd.read_csv(StringIO(csvfile), sep='\t', skiprows=skiprows)
This assumes that you know the sep of the file
Also if you want to find the key:
csvfile = 'Idnum\tId\nkey:maturity\n2\n\n\n\n\n\nmaturity\tpara1\tpara2\n1Y\t0\t0\n2Y\t0\t0'
data = pd.read_csv(StringIO(csvfile), sep='\t', error_bad_lines=False, header=None)
key = [x.replace('key:','') for x in data[0] if x.find('key')>-1]
skiprows = len(data)
pd.read_csv(StringIO(csvfile), sep='\t', skiprows=skiprows).set_index(key)

Set Multiple Restrictions for Rows Called to Print in Pandas

import pandas as pd
import numpy as np
#load data
#data file and py file must be in same file path
df = pd.read_csv('cbp15st.txt', delimiter = ',', encoding = 'utf-8-
sig')
#define load data DataFrame columns
state = df['FIPSTATE']
industry = df['NAICS']
legal_form_of_organization = df['LFO']
suppression_flag = df['EMPFLAG']
total_establishment = df['EST']
establishment_1_4 = df['N1_4']
establishment_5_9 = df['N5_9']
establishment_10_19 = df['N10_19']
establishment_20_49 = df['N20_49']
establishment_50_99 = df['N50_99']
establishment_100_249 = df['N100_249']
establishment_250_499 = df['N250_499']
establishment_500_999 = df['N500_999']
establishment_1000_more = df['N1000']
#use df.loc to parse dataset for partiuclar value types
print(df.loc[df['EMPFLAG']=='A'], df.loc[df['FIPSTATE']==1],
df.loc[df['NAICS']=='------'])
Currently using df.loc to locate specific values from the df columns, but will read out those columns that contain all of these values, not only these values (like an or vs and statement)
Trying to find a way to place multiple restrictions on this to only get column reads that meet criteria x y and z.
Current Readout from above:
enter image description here
You can use & operator while specifying multiple filtering criteria, something like:
df1 = df.loc[(df['EMPFLAG']=='A']) & (df['FIPSTATE']==1) & (df['NAICS']=='------')]
print(df1)