'how to navigate to multiple pages Webscrap by beautiful soup when page no is encrypted?

i used to web scrape a site which cantains 1000 pages and i wused to traverse each page with page no as 1,2,3...1000 and download data in excel now they have encrypted the page no. so code is no working need help. url is coming like this now https://bidplus.gem.gov.in/bidlists?bidlists&page_no=Hgw0LYpSZdLXow1Wq84uKar1nxXbFhClXQDuAAiPDxU code is written below.

pagination html enter image description here enter image description here
code

import concurrent
import functools
import concurrent.futures
import requests
from urllib3.exceptions import InsecureRequestWarning
import csv

requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from bs4 import BeautifulSoup as bs

def download_page(session, page_no):
    url = 'https://bidplus.gem.gov.in/bidlists?bidlists&page_no=' + str(page_no)
    print('URL created: ' + url)
    resp = session.get(url, verify=False)
    return resp.text


def scrap_bid_data():
    NUMBER_THREADS =5 # number of concurrent download requests
    with open('GEMconcurrent_1016.csv', 'w', newline='') as out_file:
        f = csv.writer(out_file)
        f.writerow(['Bidnumber', 'Items', 'Quantity', 'Department', 'Enddate','pageNumber'])
        with requests.Session() as session:
            page_downloader = functools.partial(download_page, session)
            with concurrent.futures.ThreadPoolExecutor(max_workers=NUMBER_THREADS) as executor:
                pages = executor.map(page_downloader, range(1,10))
                page_no = 0
                for page in pages:
                    page_no += 1
                    soup_data = bs(page, 'lxml')
                    extracted_data = soup_data.find('div', {'id': 'pagi_content'})
                    if extracted_data is None or len(extracted_data) == 0:
                        print('No data at page number', page_no)
                        print(page)

                    else:
                        for idx in range(len(extracted_data)):
                            if (idx % 2 == 1):
                                bid_data = extracted_data.contents[idx].text.strip().split('\n')
                                if (len(bid_data)>1):
                                    print(page_no)
                                    if (len(bid_data[8]) > 1 and len(bid_data[10].split(':'))>1 ):

                                                            bidno = bid_data[0].split(":")[-1]
                                                            items = bid_data[9].strip().split('Items:')[-1]
                                                            qnty = int(bid_data[10].split(':')[-1])
                                                            dept = (bid_data[11] + bid_data[16].strip()).split(":")[-1]
                                                            edate = bid_data[21].split("End Date:")[1]
                                                            f.writerow([bidno, items, qnty, dept, edate,page_no])
scrap_bid_data()


Sources

This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.

Source: Stack Overflow

Solution Source