간단한 웹 크롤링

파이썬을 이용한 간단한 웹 크롤링

1. 소스코드

main.py

from indeed import  get_jobs as get_indeed_jobs 
from so import get_jobs as get_so_jobs
from save import save_to_file

indeed_jobs = get_indeed_jobs()
so_jobs=get_so_jobs()
jobs= indeed_jobs + so_jobs
job = so_jobs
save_to_file(jobs)

indeed.py

import requests
from bs4 import BeautifulSoup

LIMIT = 50
URL = f"https://kr.indeed.com/jobs?q=python&limit={LIMIT}"


def extract_indeed_pages():
    result = requests.get(URL)
    soup = BeautifulSoup(result.text, "html.parser")
    pagination = soup.find("div", {"class": "pagination"})

    links = pagination.find_all('a')
    pages = []
    for link in links[:-1]:
        pages.append(int(link.string))

    max_page = pages[-1]
    return max_page


def extract_jobs(html):
    title = html.find("h2", {
        "class": "title"
    }).find(
        "a", recursive=False)['title']
    company = html.find("span", {"class": "company"})
    if company:
        company_anchor = company.find("a")
        if company_anchor is not None:
            company = str(company_anchor.string)
        else:
            company = str(company.string)
    else:
        company = None
    company = company.strip()
    location = html.find("div", {"class": "recJobLoc"})["data-rc-loc"]
    job_id = html["data-jk"]
    return {
        'title': title,
        'company': company,
        'location': location,
        "link": f"https://www.indeed.com/viewjob?jk={job_id}"
    }


def extract_indeed_jobs(last_page):
    jobs = []
    for page in range(last_page):
        print(f"Scrapping indeed: page={page}")
        result = requests.get(f"{URL}&start={page*LIMIT}")
        soup = BeautifulSoup(result.text, "html.parser")
        results = soup.find_all("div", {"class": "jobsearch-SerpJobCard"})
        for result in results:
            job = extract_jobs(result)
            jobs.append(job)
    return jobs


def get_jobs():
    last_indeed_page = extract_indeed_pages()
    jobs = extract_indeed_jobs(last_indeed_page)
    return jobs

save.py

import csv

def save_to_file(jobs):
  file = open("jobs.csv",mode="w")
  writer =csv.writer(file)
  writer.writerow(["title", "company","location","link"])
  for job in jobs:
    writer.writerow(list(job.values()))
  return

so.py

import requests
from bs4 import BeautifulSoup

LIMIT = 50
URL = f"https://stackoverflow.com/jobs?q=python&sort=i"


def get_last_page():
    result = requests.get(URL)
    soup = BeautifulSoup(result.text, "html.parser")
    pages = soup.find("div", {"class": "s-pagination"}).find_all("a")
    last_page = pages[-2].get_text(strip=True)
    return int(last_page)


def extract_job(html):
    title = html.find("h2", {"class": "mb4"}).find("a")["title"]
    company, location = html.find("h3", {"class": "mb4"}).find_all("span", recursive=False)
    company = company.get_text(strip=True).strip("•").strip("\n")
    location = location.get_text(strip=True).strip("\n")
    job_id = html['data-jobid']
    return {
        'title': title,
        'company': company,
        'location': location,
        'link' : f"https://stackoverflow.com/jobs/{job_id}"
    }


def extract_jobs(last_page):
    jobs = []
    for page in range(last_page):
        print(f"Scrapping SO: page: {page}")
        result = requests.get(f"{URL}&pg={page+1}")
        soup = BeautifulSoup(result.text, "html.parser")
        results = soup.find_all("div", {"class": "-job"})
        for result in results:
            job = extract_job(result)
            jobs.append(job)
    return jobs


def get_jobs():
    last_page = get_last_page()
    jobs = extract_jobs(last_page)
    return jobs

3. 결과

test

4. 후기

군대에서 했던 내용이라 오래 걸렸지만 즐겁게 크롤링 하게 되었습니다.

댓글남기기