爬取某APP的数据

爬取某APP的数据

2023年6月27日发(作者:)

爬取某APP的数据好久没写博客了,也许⼈还处在迷茫状态⼀天浑浑噩噩的。最近写了⼀个爬⾍脚本爬某APP的⽤户⼚商数据,由于数据有12W+加上sleep的时间跑起来花费的时间很长。所以我将脚本分开写了先抓⼀级页⾯的请求参数再抓⼆级页⾯的详细数据,也可以将两个脚本合并,抓到的请求参数会存放在列表中在⼆脚本循环读取。数据量过⼤频繁的抓取必然会遭到反爬,所以我们需要代理IP池。说实在的去抓取免费的代理IP池真的纯属浪费时间,能⽤的IP少得可怜,那种东西只适合⾃⼰写着玩玩。真正的实际操作中你要为公司抓取数据还是买个代理IP套餐,下图标红的喂代理AP的API,调⽤API获取代理IP,把获取到的IP放进池⼦⾥通过页⾯的状态码去甄别有效的IP加以利⽤。(忘了说抓APP数据要⽤filder去找请求头参数,⽤⾃⼰⼿机下载注册APP连接到跟电脑同⼀⽹段的wife,通过代理IP把wife配置成⾃⼰电脑的IP这样你在打开APP的时候filder上会刷出APP的请求记录)import requestsimport urllib3import pprintimport socketimport pymysqlimport pandas as pdimport osimport sslimport timedatapage = []aplist = []T_RETRIES = 5timeout = aulttimeout(timeout)e_warnings()ssl._create_default_https_context = ssl._create_unverified_contextproxy_pool_url = []def main(): e_warnings() (r'E:eclipse-workspaceday23weixiuzhanvenv') url1 = '47.106.123.30:8070/app/api/usercompany/finsCompanyListAndVipCompany' df = _json('') headers = { 'Content-Type': 'application/x-www-form-urlencoded', 'Connection': 'keep-alive', 'Accept': '*/*', 'Accept-Language': 'zh-Hans-CN;q=1, en-CN;q=0.9', 'Content-Length': '86', 'Accept-Encoding': 'gzip, deflate', 'User-Agent': 'lanaer/15.1.8 (iPhone; iOS 14.4.2; Scale/2.00)' } sk = 0 for i in range(len()): for Num in range(1, 20): try: # print(df['data'][i]['latitude'], df['data'][i]['longitude']) params = "locationX=" + str(df['dataes'][i]['latitude']) + "&locationY=" + str(df['dataes'][i]['longitude']) + "&oneselfType=0&pageId=" + str(Num) + "&pageCount=25&pageSize=20&userType=3" except KeyError: continue gurl = '/api/v2/proxies?order_id=ZILH39697&num=1&format=text&line_separator=win&can_repeat=no&user_token rep = (url=gurl) proxy_pool_() (1) proxies = {'https': '' + proxy_pool_url[sk]} response = (url=url1, allow_redirects=False, proxies=proxies, data=params, headers=headers, verify=False) json_data = () result = json_data coed_status = result['msg'] if coed_status == '您的操作过于频繁,请休息⼀下吧~': sk = sk + 1 proxies1 = {'https': '' + proxy_pool_url[sk]} response1 = (url=url1, allow_redirects=False, proxies=proxies1, data=params, headers=headers, verify=False) json_data1 = () result2 = json_data1 else: result2 = json_data try: result2['data']['result'][0] except (IndexError, KeyError, TypeError): continue for i in range(len(result['data'])): try: id = result2['data']['result'][i]['_id'] (id) except (IndexError, KeyError): id = ' ' (id) (2) print(aplist) output = open('', 'w', encoding='gbk') (str(aplist)) ()if __name__ == '__main__': main()

import requestsimport urllib3import pprintimport socketimport pymysqlimport pandas as pdimport osimport sslimport timedatapage = []aplist = []T_RETRIES = 5timeout = aulttimeout(timeout)e_warnings()ssl._create_default_https_context = ssl._create_unverified_contextproxy_pool_url = []def main(): e_warnings() (r'E:eclipse-workspaceday23weixiuzhanvenv') df = _json('') head = { 'Content-Type': 'application/x-www-form-urlencoded', 'Connection': 'keep-alive', 'Connection': 'keep-alive', 'Accept': '*/*', 'User-Agent': 'lanaer/15.1.20 (iPhone; iOS 13.4.2; Scale/2.00)', 'Accept-Language': 'zh-Hans-CN;q=1, en-CN;q=0.9', 'Content-Length': '319', 'Accept-Encoding': 'gzip, deflate' } url2 = '47.106.123.30:8070/app/api/usercompany/v1/getById' sk = 0 for i in range(len()): param = "companyType=3&id=" + str(df['dataes'][i]['id']) + "&key=QiOiJBUFAiLCJpc3MiOiJTZXJ2aWNlIiwiZXh gurl = '/api/v2/proxies?order_id=ZILH39697&num=1&format=text&line_separator=win&can_repeat=no&user_token=8 rep = (url=gurl) proxy_pool_() (1) proxies = {'https': '' + proxy_pool_url[sk]} response = (url=url2, allow_redirects=False, proxies=proxies, data=param, headers=head,verify=False) json_data = () result = json_data coed_status = result['msg'] if coed_status == '您的操作过于频繁,请休息⼀下吧~': sk = sk + 1 proxies1 = {'https': '' + proxy_pool_url[sk]} response1 = (url=url2, allow_redirects=False, proxies=proxies1, data=param, headers=head,verify=False) json_data1 = () result2 = json_data1 else: result2 = json_data print(proxies) print(result2) addert = [] try: companyName = result2['data']['companyName'] (companyName) except (IndexError, KeyError): companyName = ' ' (companyName) try: repairTypeName = result2['data']['repairTypeName'] (repairTypeName) except (IndexError, KeyError): repairTypeName = ' ' (repairTypeName) try: contacts = result2['data']['contacts'] (contacts) except (IndexError, KeyError): contacts = ' ' (contacts) try: mobile = result2['data']['mobile'] (mobile) except (IndexError, KeyError): mobile = ' ' (mobile) try: workDescribe = result2['data']['workDescribe'].strip('n') (workDescribe) except (IndexError, KeyError): workDescribe = ' ' (workDescribe) try: address = result2['data']['address'] (address) except (IndexError, KeyError): address = ' ' (address) try: location = result2['data']['location'] (location) except (IndexError, KeyError): location = ' ' (location) (addert) (1) dbpath = t(host='192.168.1.202', port=3306, user='root', password='Password@123', database='wxzhan') saveData(datapage, dbpath)# 创建数据表def init_db(dbpath): c = () # 获取游标 sql = ''' CREATE TABLE `weixiuz` (id int unsigned not null auto_increment primary key, `company` mediumtext NULL , `type` mediumtext NULL , `contact` mediumtext NULL , `mobile` mediumtext NULL , `describe` longtext NULL , `address` mediumtext NULL , `location` longtext NULL

) ''' # 创建数据表 (reconnect=True) e(sql) # 执⾏SQL () # 提交数据库操作 () # 关闭数据库连接# 保存数据def saveData(datapage, dbpath): init_db(dbpath) cur = () for page in datapage: for index in range(len(page)): if (len(page[index]) != 0): page[index] = '"' + str(page[index]) + '"' # ('"' + str(page1[index]) + '"') else: page[index] = '""' sql = ''' insert into `weixiuz` (company, `type`, contact, `mobile`, `describe`, `address`, `location`) values (%s)''' % str(",".join(page)) print(sql) (reconnect=True) e(sql) () () ()if __name__ == '__main__': main()

发布者:admin,转转请注明出处:http://www.yc00.com/web/1687865898a52061.html

相关推荐

发表回复

评论列表(0条)

  • 暂无评论

联系我们

400-800-8888

在线咨询: QQ交谈

邮件:admin@example.com

工作时间:周一至周五,9:30-18:30,节假日休息

关注微信