1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
| import os
import csv
import requests
from bs4 import BeautifulSoup
os.system("clear")
alba_url = "http://www.alba.co.kr"
home_res = requests.get(alba_url)
links = BeautifulSoup(home_res.text, "html.parser").find('div',{'id':'MainSuperBrand'}).find_all('a',{'class': 'goodsBox-info'})
for a in links:
href = a.attrs['href']
# 방어코드
file_name = a.find('span',{'class':'company'}).get_text().replace('/',',')
file = open(file_name, mode="w")
writer = csv.writer(file)
writer.writerow(["place", "title", "time", "pay", "date"])
each_page = requests.get(href)
contents = BeautifulSoup(each_page.text, "html.parser").find('tbody').find_all('tr',{'class': ''})
for content in contents:
# 방어코드
bplace = content.find('td',{'class':'local'})
if bplace:
place= bplace.get_text()
title = content.find('span',{'class':'company'}).get_text()
TIS = content.find('td',{'class':'data'}).find('span').get_text()
paystd = content.find('td',{'class':'pay'}).find('span',{'class':'payIcon'}).get_text()
payunit = content.find('td',{'class':'pay'}).find('span',{'class':'number'}).get_text()
pay = paystd + payunit
post = content.find('td',{'class':'regDate'}).get_text()
writer.writerow([place,title,TIS,pay,post])
|