Notes
Notes - notes.io |
from bs4 import BeautifulSoup
from selenium.common.exceptions import NoSuchElementException
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver import ActionChains
from selenium.webdriver.support.wait import WebDriverWait
import pandas as pd
import numpy as np
import time
from datetime import datetime
import re
import requests
import os
from PIL import Image
from webdriver_manager.chrome import ChromeDriverManager
# %%
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
# driver = webdriver.Chrome(ChromeDriverManager().install())
# %%
# USER = os.getlogin()
output_path = '../1 Inputs/1 Bases originales/repositorioImagenes'
Chrome_path='D:/OneDrive/apoyoconsultoria.com/File Server - Analytics/3 Proyectos/0 otros/2021-xxx-L Social listening comercial/3 Pipeline/3 Programas'
driver = webdriver.Chrome()
#driver = webdriver.Chrome(os.path.abspath("chromedriver.exe"))
driver.get('https://web.peruquiosco.pe/')
print("connection enabled")
# In[25]:
# _email = '[email protected]'
# _password = 'AC082021'
_email = '[email protected]'
_password = '12345678'
time.sleep(10)
driver.maximize_window()
# %%
iframe_popup = WebDriverWait(driver, 60).until(lambda x: x.find_element(By.XPATH,'//*[@id="offer_f5b2ec0996120f065b64-0"]'))
#iframe_popup = WebDriverWait(driver, 60).until(lambda x: x.find_element_by_xpath('//*[@id="offer_f5b2ec0996120f065b64-0"]'))
driver.switch_to.frame(iframe_popup)
btn_popup = WebDriverWait(driver, 60).until(lambda x: x.find_element_by_xpath('//*[@id="template-container"]/div/div[2]/div/div[1]/button'))
btn_popup.click()
# %%
#
time.sleep(3)
btn_popup = WebDriverWait(driver, 60).until(lambda x: x.find_element_by_xpath('//*[@id="root"]/div/div[1]/div/div[2]/div[2]/button[2]'))
btn_popup.click()
# %%
#
iframe_popup = WebDriverWait(driver, 60).until(lambda x: x.find_element_by_xpath('/html/body/div[4]/div/iframe'))
driver.switch_to.frame(iframe_popup)
# %%
txt_email = WebDriverWait(driver, 20).until(
lambda x: x.find_element_by_xpath('//*[@id="autofill-form"]/screen-login/p[3]/input'))
txt_pass = driver.find_element_by_xpath('//*[@id="autofill-form"]/screen-login/p[4]/input')
# %%
time.sleep(10)
txt_email.clear()
txt_email.send_keys(_email)
time.sleep(2)
txt_pass.clear()
txt_pass.send_keys(_password)
time.sleep(2)
btn_login_2 = driver.find_element_by_xpath('//*[@id="autofill-form"]/screen-login/p[6]/button')
btn_login_2.click()
driver.switch_to.default_content()
time.sleep(10)
# %%
from selenium.webdriver.common.action_chains import ActionChains
element = driver.find_element_by_xpath("/html/body/div[1]/div/div[4]/div/div/div[2]/div/div[2]/div/div[6]")
# actions = ActionChains(driver)
# actions.move_to_element(element).perform()
time.sleep(3)
# %%
driver.execute_script("arguments[0].scrollIntoView();", element)
time.sleep(2)
driver.execute_script("arguments[0].scrollIntoView();", element)
time.sleep(3)
# %%
btn_popup = WebDriverWait(driver, 60).until(lambda x: x.find_element_by_xpath('/html/body/div[1]/div/div[4]/div/div/div[2]/div/div[2]/div/div[6]'))
btn_popup.click()
fecha_busqueda = datetime.now()
url_newspaper_list = ['https://visor.peruquiosco.pe/diario-el-comercio',
'https://visor.peruquiosco.pe/diario-peru21/']
prefix_newspaper_list =['elcomercio',
'peru21']
if fecha_busqueda.date().weekday() not in [5,6]:
url_newspaper_list += ['https://visor.peruquiosco.pe/diario-gestion/'
]
prefix_newspaper_list += ['gestion']
# if fecha_busqueda.date().weekday() == 0:
# url_newspaper_list += ['http://elcomercio.peruquiosco.pe/m/dia-1/' + fecha_busqueda.strftime('%Y%m%d')]
# prefix_newspaper_list += ['dia1']
if fecha_busqueda.date().weekday() == 5:
url_newspaper_list += ['https://visor.peruquiosco.pe/somos/']
prefix_newspaper_list += ['rsomos']
if fecha_busqueda.date().weekday() == 4 and ((fecha_busqueda.day - 1) // 7 + 1) == 3:
url_newspaper_list += ['https://visor.peruquiosco.pe/revista-g/']
prefix_newspaper_list += ['revistag']
# In[121]:
def check_exists_by_xpath(xpath):
try:
driver.find_element_by_xpath(xpath)
except NoSuchElementException:
return False
return True
def download_newspaper(url_newspaper, search_date, newspaper_prefix):
url_newspaper + '/' + search_date.strftime('%Y%m%d')
driver.get(url_newspaper)
button_pages = WebDriverWait(driver, 120).until(
lambda x: x.find_element_by_xpath('//li[@id="toolbarBottom_0"]'))
button_pages.click()
urls_div = driver.find_element_by_xpath("//div[@class='pp-slider']")
img_tiles_list = urls_div.find_elements_by_xpath("//img")
url_tiles_list = list()
for img_tiles in img_tiles_list:
url_tiles_list.append(img_tiles.get_attribute('src'))
url_tiles_list.sort()
actual_url_tiles_list = [x for x in url_tiles_list if 'page=' in x]
num_pages_list = [re.search('page=(.*)&scale',x).group(1) for x in actual_url_tiles_list]
doc_name = re.search('file=(.*)&page',actual_url_tiles_list[0]).group(1)
total_num_pages = max(list(set([int(x) for x in num_pages_list])))
btn_popup = driver.find_element_by_xpath('//*[@id="thumb_1"]/a/span/span/img')
btn_popup.click()
cont=0
listarapida=[]
for e in range(0,14):
for i in range(2,28):
cont+=1
camino='//*[@id="Panel_pagesViewContainer"]/div[1]/div['+str(i)+']/div[1]/img[1]'
if(check_exists_by_xpath(camino)):
listarapida.append(driver.find_element_by_xpath(camino).get_attribute('src'))
btn_popup = WebDriverWait(driver, 60).until(lambda x: x.find_element_by_xpath('//*[@id="Panel_pagesViewContainer"]/div[3]'))
btn_popup.click()
time.sleep(4)
mylist = list(dict.fromkeys(listarapida))
ListaLink=[]
for i in mylist:
if i.find('https://i.prcdn.co/img?file')!=-1:
ListaLink.append(i)
print(ListaLink)
for link in ListaLink:
try:
pil_image,pil_image2 = download_page_bg_fgv2(link)
num_page=getNumber(link)
num_page_label = str(num_page) if num_page >= 10 else '0' + str(num_page)
save_path = new_directory + '/im_{}_{}__{}.png'.format(newspaper_prefix, search_date.strftime("%Y%m%d"), num_page_label)
pil_image.save(save_path, 'PNG')
save_path = new_directory + '/im_{}_{}__{}_{}.png'.format(newspaper_prefix, search_date.strftime("%Y%m%d"), num_page_label,'fg')
pil_image2.save(save_path, 'PNG')
except:
continue
def download_page(url):
escala=url.find('scale')
url=url[:escala+6:]+str(125)+url[escala+8:]
url_page = url
print(url_page)
_img_pg = requests.get(url_page,stream=True)
_img_pg.raw.decode_content = True # handle spurious Content-Encoding
pil_img = Image.open(_img_pg.raw)
return pil_img
def download_page_bg_fg(file_name, num_page, scale):
url_template_page = 'https://i.prcdn.co/img?file={}&page={}&scale={}&layer={}'
layers = ['bg', 'fg']
url_page_bg = url_template_page.format(file_name, num_page, scale, layers[0])
url_page_fg = url_template_page.format(file_name, num_page, scale, layers[1])
print(url_page_bg)
print(url_page_fg)
_img_bg = requests.get(url_page_bg,stream=True)
_img_bg.raw.decode_content = True # handle spurious Content-Encoding
pil_img_bg = Image.open(_img_bg.raw)
_img_fg = requests.get(url_page_fg,stream=True)
_img_fg.raw.decode_content = True # handle spurious Content-Encoding
pil_img_fg = Image.open(_img_fg.raw)
pil_img_bg = pil_img_bg.convert('RGBA')
pil_img_fg = pil_img_fg.convert('RGBA')
pil_img = Image.alpha_composite(pil_img_bg, pil_img_fg)
return pil_img
def getNumber(url):
n2=url.find('&scale')
print(n2)
n1=url.find('&page')
print(n1)
num=url[n1+6:n2]
return int(num)
def download_page_bg_fgv2(url):
layers = ['bg', 'fg']
escala=url.find('scale')
url=url[:escala+6:]+str(179)+url[escala+8:]
url_page_bg = url
url_page_fg = url.replace('bg','fg')
scraper = cloudscraper.create_scraper(browser={'browser': 'firefox','platform': 'windows','mobile': False})
print(url_page_bg)
print(url_page_fg)
_img_bg = scraper.get(url_page_bg,stream=True)
_img_bg.raw.decode_content = True # handle spurious Content-Encoding
pil_img_bg = Image.open(_img_bg.raw)
_img_fg = scraper.get(url_page_fg,stream=True)
_img_fg.raw.decode_content = True # handle spurious Content-Encoding
pil_img_fg = Image.open(_img_fg.raw)
pil_img_fg2=pil_img_fg
pil_img_bg = pil_img_bg.convert('RGBA')
pil_img_fg = pil_img_fg.convert('RGBA')
pil_img = Image.alpha_composite(pil_img_bg, pil_img_fg)
return pil_img,pil_img_fg2
new_directory = output_path + '/img_' + fecha_busqueda.strftime('%Y%m%d')
if not os.path.exists(new_directory):
os.makedirs(new_directory)
try:
for url_newspaper,prefix_newspaper in zip(url_newspaper_list,prefix_newspaper_list):
download_newspaper(url_newspaper, fecha_busqueda, prefix_newspaper)
time.sleep(4)
Salir = driver.find_element_by_xpath('/html/body/div[8]/div[3]/div/div/div[3]/a/span/span[2]')
Salir.click()
time.sleep(4)
Salir = driver.find_element_by_xpath('/html/body/div[10]/div/section/div/div/ul/li[2]')
Salir.click()
time.sleep(4)
driver.quit()
except:
driver.quit()
print("An exception occurred")
|
Notes is a web-based application for online taking notes. You can take your notes and share with others people. If you like taking long notes, notes.io is designed for you. To date, over 8,000,000,000+ notes created and continuing...
With notes.io;
- * You can take a note from anywhere and any device with internet connection.
- * You can share the notes in social platforms (YouTube, Facebook, Twitter, instagram etc.).
- * You can quickly share your contents without website, blog and e-mail.
- * You don't need to create any Account to share a note. As you wish you can use quick, easy and best shortened notes with sms, websites, e-mail, or messaging services (WhatsApp, iMessage, Telegram, Signal).
- * Notes.io has fabulous infrastructure design for a short link and allows you to share the note as an easy and understandable link.
Fast: Notes.io is built for speed and performance. You can take a notes quickly and browse your archive.
Easy: Notes.io doesn’t require installation. Just write and share note!
Short: Notes.io’s url just 8 character. You’ll get shorten link of your note when you want to share. (Ex: notes.io/q )
Free: Notes.io works for 14 years and has been free since the day it was started.
You immediately create your first note and start sharing with the ones you wish. If you want to contact us, you can use the following communication channels;
Email: [email protected]
Twitter: http://twitter.com/notesio
Instagram: http://instagram.com/notes.io
Facebook: http://facebook.com/notesio
Regards;
Notes.io Team