{"id":1075047,"date":"2025-01-08T11:40:16","date_gmt":"2025-01-08T03:40:16","guid":{"rendered":"https:\/\/docs.pingcode.com\/ask\/ask-ask\/1075047.html"},"modified":"2025-01-08T11:40:18","modified_gmt":"2025-01-08T03:40:18","slug":"%e5%a6%82%e4%bd%95%e7%94%a8python%e6%89%b9%e9%87%8f%e4%b8%8b%e8%bd%bd%e5%9b%be%e7%89%87%e5%a4%a7%e5%b0%8f-2","status":"publish","type":"post","link":"https:\/\/docs.pingcode.com\/ask\/1075047.html","title":{"rendered":"\u5982\u4f55\u7528python\u6279\u91cf\u4e0b\u8f7d\u56fe\u7247\u5927\u5c0f"},"content":{"rendered":"<p style=\"text-align:center;\" ><img decoding=\"async\" src=\"https:\/\/cdn-kb.worktile.com\/kb\/wp-content\/uploads\/2024\/04\/25104511\/3be9ac6f-44c1-435b-801b-a4ecfcfa19eb.webp\" alt=\"\u5982\u4f55\u7528python\u6279\u91cf\u4e0b\u8f7d\u56fe\u7247\u5927\u5c0f\" \/><\/p>\n<p><p> <strong>\u7528Python\u6279\u91cf\u4e0b\u8f7d\u56fe\u7247\u7684\u65b9\u6cd5\u6709\u5f88\u591a\uff0c\u5e38\u7528\u7684\u65b9\u6cd5\u5305\u62ec\u4f7f\u7528requests\u3001BeautifulSoup\u548curllib\u5e93\u3002<\/strong> \u5728\u672c\u6587\u4e2d\uff0c\u6211\u4eec\u5c06\u8be6\u7ec6\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528\u8fd9\u4e9b\u5e93\u6765\u5b9e\u73b0\u6279\u91cf\u4e0b\u8f7d\u56fe\u7247\u7684\u529f\u80fd\uff0c\u5e76\u63a2\u8ba8\u5176\u4e2d\u7684\u4f18\u7f3a\u70b9\uff0c\u786e\u4fdd\u60a8\u80fd\u591f\u9ad8\u6548\u5730\u7ba1\u7406\u56fe\u7247\u4e0b\u8f7d\u4efb\u52a1\u3002<\/p>\n<\/p>\n<p><h3>\u4e00\u3001\u4f7f\u7528requests\u5e93\u548cBeautifulSoup\u5e93<\/h3>\n<\/p>\n<p><p>requests\u5e93\u548cBeautifulSoup\u5e93\u662fPython\u4e2d\u975e\u5e38\u6d41\u884c\u7684\u4e24\u4e2a\u5e93\uff0crequests\u5e93\u7528\u4e8e\u53d1\u9001HTTP\u8bf7\u6c42\uff0cBeautifulSoup\u5e93\u7528\u4e8e\u89e3\u6790HTML\u548cXML\u6587\u6863\u3002\u901a\u8fc7\u7ed3\u5408\u4f7f\u7528\u8fd9\u4e24\u4e2a\u5e93\uff0c\u6211\u4eec\u53ef\u4ee5\u65b9\u4fbf\u5730\u4ece\u7f51\u9875\u4e2d\u63d0\u53d6\u56fe\u7247\u7684URL\uff0c\u5e76\u4e0b\u8f7d\u8fd9\u4e9b\u56fe\u7247\u3002<\/p>\n<\/p>\n<p><h4>1\u3001\u5b89\u88c5\u5fc5\u8981\u7684\u5e93<\/h4>\n<\/p>\n<p><p>\u9996\u5148\uff0c\u6211\u4eec\u9700\u8981\u5b89\u88c5requests\u548cBeautifulSoup\u5e93\uff1a<\/p>\n<\/p>\n<p><pre><code class=\"language-bash\">pip install requests<\/p>\n<p>pip install beautifulsoup4<\/p>\n<p><\/code><\/pre>\n<\/p>\n<p><h4>2\u3001\u63d0\u53d6\u56fe\u7247URL<\/h4>\n<\/p>\n<p><p>\u63a5\u4e0b\u6765\uff0c\u6211\u4eec\u7f16\u5199\u4e00\u4e2a\u51fd\u6570\uff0c\u4ece\u7f51\u9875\u4e2d\u63d0\u53d6\u6240\u6709\u56fe\u7247\u7684URL\uff1a<\/p>\n<\/p>\n<p><pre><code class=\"language-python\">import requests<\/p>\n<p>from bs4 import BeautifulSoup<\/p>\n<p>def get_image_urls(url):<\/p>\n<p>    response = requests.get(url)<\/p>\n<p>    soup = BeautifulSoup(response.text, &#39;html.parser&#39;)<\/p>\n<p>    image_urls = []<\/p>\n<p>    for img_tag in soup.find_all(&#39;img&#39;):<\/p>\n<p>        img_url = img_tag.get(&#39;src&#39;)<\/p>\n<p>        if img_url:<\/p>\n<p>            image_urls.append(img_url)<\/p>\n<p>    return image_urls<\/p>\n<p><\/code><\/pre>\n<\/p>\n<p><h4>3\u3001\u4e0b\u8f7d\u56fe\u7247<\/h4>\n<\/p>\n<p><p>\u7136\u540e\uff0c\u6211\u4eec\u7f16\u5199\u4e00\u4e2a\u51fd\u6570\u6765\u4e0b\u8f7d\u56fe\u7247\uff0c\u5e76\u4fdd\u5b58\u5230\u672c\u5730\uff1a<\/p>\n<\/p>\n<p><pre><code class=\"language-python\">import os<\/p>\n<p>def download_images(image_urls, save_dir):<\/p>\n<p>    if not os.path.exists(save_dir):<\/p>\n<p>        os.makedirs(save_dir)<\/p>\n<p>    for img_url in image_urls:<\/p>\n<p>        img_data = requests.get(img_url).content<\/p>\n<p>        img_name = os.path.join(save_dir, img_url.split(&#39;\/&#39;)[-1])<\/p>\n<p>        with open(img_name, &#39;wb&#39;) as img_file:<\/p>\n<p>            img_file.write(img_data)<\/p>\n<p>        print(f&quot;Downloaded {img_name}&quot;)<\/p>\n<p><\/code><\/pre>\n<\/p>\n<p><h4>4\u3001\u7ed3\u5408\u4f7f\u7528<\/h4>\n<\/p>\n<p><p>\u6700\u540e\uff0c\u6211\u4eec\u7ed3\u5408\u4e0a\u8ff0\u4e24\u4e2a\u51fd\u6570\uff0c\u5b9e\u73b0\u6279\u91cf\u4e0b\u8f7d\u56fe\u7247\uff1a<\/p>\n<\/p>\n<p><pre><code class=\"language-python\">def m<a href=\"https:\/\/docs.pingcode.com\/blog\/59162.html\" target=\"_blank\">AI<\/a>n(url, save_dir):<\/p>\n<p>    image_urls = get_image_urls(url)<\/p>\n<p>    download_images(image_urls, save_dir)<\/p>\n<p>if __name__ == &quot;__main__&quot;:<\/p>\n<p>    url = &quot;https:\/\/example.com&quot;<\/p>\n<p>    save_dir = &quot;.\/images&quot;<\/p>\n<p>    main(url, save_dir)<\/p>\n<p><\/code><\/pre>\n<\/p>\n<p><h3>\u4e8c\u3001\u4f7f\u7528urllib\u5e93<\/h3>\n<\/p>\n<p><p>urllib\u5e93\u662fPython\u6807\u51c6\u5e93\u4e2d\u7528\u4e8e\u5904\u7406URL\u7684\u6a21\u5757\u3002\u6211\u4eec\u540c\u6837\u53ef\u4ee5\u4f7f\u7528urllib\u5e93\u6765\u6279\u91cf\u4e0b\u8f7d\u56fe\u7247\u3002<\/p>\n<\/p>\n<p><h4>1\u3001\u63d0\u53d6\u56fe\u7247URL<\/h4>\n<\/p>\n<p><p>\u9996\u5148\uff0c\u6211\u4eec\u7f16\u5199\u4e00\u4e2a\u51fd\u6570\uff0c\u4ece\u7f51\u9875\u4e2d\u63d0\u53d6\u6240\u6709\u56fe\u7247\u7684URL\uff1a<\/p>\n<\/p>\n<p><pre><code class=\"language-python\">import urllib.request<\/p>\n<p>from bs4 import BeautifulSoup<\/p>\n<p>def get_image_urls(url):<\/p>\n<p>    response = urllib.request.urlopen(url)<\/p>\n<p>    soup = BeautifulSoup(response, &#39;html.parser&#39;)<\/p>\n<p>    image_urls = []<\/p>\n<p>    for img_tag in soup.find_all(&#39;img&#39;):<\/p>\n<p>        img_url = img_tag.get(&#39;src&#39;)<\/p>\n<p>        if img_url:<\/p>\n<p>            image_urls.append(img_url)<\/p>\n<p>    return image_urls<\/p>\n<p><\/code><\/pre>\n<\/p>\n<p><h4>2\u3001\u4e0b\u8f7d\u56fe\u7247<\/h4>\n<\/p>\n<p><p>\u7136\u540e\uff0c\u6211\u4eec\u7f16\u5199\u4e00\u4e2a\u51fd\u6570\u6765\u4e0b\u8f7d\u56fe\u7247\uff0c\u5e76\u4fdd\u5b58\u5230\u672c\u5730\uff1a<\/p>\n<\/p>\n<p><pre><code class=\"language-python\">import os<\/p>\n<p>def download_images(image_urls, save_dir):<\/p>\n<p>    if not os.path.exists(save_dir):<\/p>\n<p>        os.makedirs(save_dir)<\/p>\n<p>    for img_url in image_urls:<\/p>\n<p>        img_data = urllib.request.urlopen(img_url).read()<\/p>\n<p>        img_name = os.path.join(save_dir, img_url.split(&#39;\/&#39;)[-1])<\/p>\n<p>        with open(img_name, &#39;wb&#39;) as img_file:<\/p>\n<p>            img_file.write(img_data)<\/p>\n<p>        print(f&quot;Downloaded {img_name}&quot;)<\/p>\n<p><\/code><\/pre>\n<\/p>\n<p><h4>3\u3001\u7ed3\u5408\u4f7f\u7528<\/h4>\n<\/p>\n<p><p>\u6700\u540e\uff0c\u6211\u4eec\u7ed3\u5408\u4e0a\u8ff0\u4e24\u4e2a\u51fd\u6570\uff0c\u5b9e\u73b0\u6279\u91cf\u4e0b\u8f7d\u56fe\u7247\uff1a<\/p>\n<\/p>\n<p><pre><code class=\"language-python\">def main(url, save_dir):<\/p>\n<p>    image_urls = get_image_urls(url)<\/p>\n<p>    download_images(image_urls, save_dir)<\/p>\n<p>if __name__ == &quot;__main__&quot;:<\/p>\n<p>    url = &quot;https:\/\/example.com&quot;<\/p>\n<p>    save_dir = &quot;.\/images&quot;<\/p>\n<p>    main(url, save_dir)<\/p>\n<p><\/code><\/pre>\n<\/p>\n<p><h3>\u4e09\u3001\u5904\u7406\u56fe\u7247\u4e0b\u8f7d\u4e2d\u7684\u5f02\u5e38\u60c5\u51b5<\/h3>\n<\/p>\n<p><p>\u5728\u5b9e\u9645\u5e94\u7528\u4e2d\uff0c\u56fe\u7247\u4e0b\u8f7d\u8fc7\u7a0b\u4e2d\u53ef\u80fd\u4f1a\u9047\u5230\u5404\u79cd\u5f02\u5e38\u60c5\u51b5\uff0c\u4f8b\u5982\u7f51\u7edc\u95ee\u9898\u3001\u65e0\u6548\u7684URL\u7b49\u3002\u4e3a\u4e86\u63d0\u9ad8\u7a0b\u5e8f\u7684\u5065\u58ee\u6027\uff0c\u6211\u4eec\u53ef\u4ee5\u5728\u4e0b\u8f7d\u56fe\u7247\u65f6\u6dfb\u52a0\u5f02\u5e38\u5904\u7406\uff1a<\/p>\n<\/p>\n<p><pre><code class=\"language-python\">import requests<\/p>\n<p>from bs4 import BeautifulSoup<\/p>\n<p>import os<\/p>\n<p>def get_image_urls(url):<\/p>\n<p>    response = requests.get(url)<\/p>\n<p>    soup = BeautifulSoup(response.text, &#39;html.parser&#39;)<\/p>\n<p>    image_urls = []<\/p>\n<p>    for img_tag in soup.find_all(&#39;img&#39;):<\/p>\n<p>        img_url = img_tag.get(&#39;src&#39;)<\/p>\n<p>        if img_url:<\/p>\n<p>            image_urls.append(img_url)<\/p>\n<p>    return image_urls<\/p>\n<p>def download_images(image_urls, save_dir):<\/p>\n<p>    if not os.path.exists(save_dir):<\/p>\n<p>        os.makedirs(save_dir)<\/p>\n<p>    for img_url in image_urls:<\/p>\n<p>        try:<\/p>\n<p>            img_data = requests.get(img_url).content<\/p>\n<p>            img_name = os.path.join(save_dir, img_url.split(&#39;\/&#39;)[-1])<\/p>\n<p>            with open(img_name, &#39;wb&#39;) as img_file:<\/p>\n<p>                img_file.write(img_data)<\/p>\n<p>            print(f&quot;Downloaded {img_name}&quot;)<\/p>\n<p>        except Exception as e:<\/p>\n<p>            print(f&quot;Failed to download {img_url}: {e}&quot;)<\/p>\n<p>def main(url, save_dir):<\/p>\n<p>    image_urls = get_image_urls(url)<\/p>\n<p>    download_images(image_urls, save_dir)<\/p>\n<p>if __name__ == &quot;__main__&quot;:<\/p>\n<p>    url = &quot;https:\/\/example.com&quot;<\/p>\n<p>    save_dir = &quot;.\/images&quot;<\/p>\n<p>    main(url, save_dir)<\/p>\n<p><\/code><\/pre>\n<\/p>\n<p><h3>\u56db\u3001\u4f18\u5316\u56fe\u7247\u4e0b\u8f7d\u901f\u5ea6<\/h3>\n<\/p>\n<p><p>\u6279\u91cf\u4e0b\u8f7d\u56fe\u7247\u65f6\uff0c\u901f\u5ea6\u53ef\u80fd\u4f1a\u53d7\u5230\u7f51\u7edc\u5e26\u5bbd\u548c\u670d\u52a1\u5668\u54cd\u5e94\u901f\u5ea6\u7684\u9650\u5236\u3002\u4e3a\u4e86\u63d0\u9ad8\u4e0b\u8f7d\u901f\u5ea6\uff0c\u6211\u4eec\u53ef\u4ee5\u4f7f\u7528\u591a\u7ebf\u7a0b\u6216\u5f02\u6b65\u7f16\u7a0b\u6765\u5e76\u884c\u4e0b\u8f7d\u56fe\u7247\u3002<\/p>\n<\/p>\n<p><h4>1\u3001\u4f7f\u7528\u591a\u7ebf\u7a0b<\/h4>\n<\/p>\n<p><pre><code class=\"language-python\">import requests<\/p>\n<p>from bs4 import BeautifulSoup<\/p>\n<p>import os<\/p>\n<p>import threading<\/p>\n<p>def get_image_urls(url):<\/p>\n<p>    response = requests.get(url)<\/p>\n<p>    soup = BeautifulSoup(response.text, &#39;html.parser&#39;)<\/p>\n<p>    image_urls = []<\/p>\n<p>    for img_tag in soup.find_all(&#39;img&#39;):<\/p>\n<p>        img_url = img_tag.get(&#39;src&#39;)<\/p>\n<p>        if img_url:<\/p>\n<p>            image_urls.append(img_url)<\/p>\n<p>    return image_urls<\/p>\n<p>def download_image(img_url, save_dir):<\/p>\n<p>    try:<\/p>\n<p>        img_data = requests.get(img_url).content<\/p>\n<p>        img_name = os.path.join(save_dir, img_url.split(&#39;\/&#39;)[-1])<\/p>\n<p>        with open(img_name, &#39;wb&#39;) as img_file:<\/p>\n<p>            img_file.write(img_data)<\/p>\n<p>        print(f&quot;Downloaded {img_name}&quot;)<\/p>\n<p>    except Exception as e:<\/p>\n<p>        print(f&quot;Failed to download {img_url}: {e}&quot;)<\/p>\n<p>def download_images(image_urls, save_dir):<\/p>\n<p>    if not os.path.exists(save_dir):<\/p>\n<p>        os.makedirs(save_dir)<\/p>\n<p>    threads = []<\/p>\n<p>    for img_url in image_urls:<\/p>\n<p>        thread = threading.Thread(target=download_image, args=(img_url, save_dir))<\/p>\n<p>        thread.start()<\/p>\n<p>        threads.append(thread)<\/p>\n<p>    for thread in threads:<\/p>\n<p>        thread.join()<\/p>\n<p>def main(url, save_dir):<\/p>\n<p>    image_urls = get_image_urls(url)<\/p>\n<p>    download_images(image_urls, save_dir)<\/p>\n<p>if __name__ == &quot;__main__&quot;:<\/p>\n<p>    url = &quot;https:\/\/example.com&quot;<\/p>\n<p>    save_dir = &quot;.\/images&quot;<\/p>\n<p>    main(url, save_dir)<\/p>\n<p><\/code><\/pre>\n<\/p>\n<p><h4>2\u3001\u4f7f\u7528\u5f02\u6b65\u7f16\u7a0b<\/h4>\n<\/p>\n<p><pre><code class=\"language-python\">import aiohttp<\/p>\n<p>import asyncio<\/p>\n<p>from bs4 import BeautifulSoup<\/p>\n<p>import os<\/p>\n<p>async def fetch(session, url):<\/p>\n<p>    async with session.get(url) as response:<\/p>\n<p>        return await response.read()<\/p>\n<p>async def get_image_urls(url):<\/p>\n<p>    async with aiohttp.ClientSession() as session:<\/p>\n<p>        response = await fetch(session, url)<\/p>\n<p>        soup = BeautifulSoup(response, &#39;html.parser&#39;)<\/p>\n<p>        image_urls = []<\/p>\n<p>        for img_tag in soup.find_all(&#39;img&#39;):<\/p>\n<p>            img_url = img_tag.get(&#39;src&#39;)<\/p>\n<p>            if img_url:<\/p>\n<p>                image_urls.append(img_url)<\/p>\n<p>        return image_urls<\/p>\n<p>async def download_image(session, img_url, save_dir):<\/p>\n<p>    try:<\/p>\n<p>        img_data = await fetch(session, img_url)<\/p>\n<p>        img_name = os.path.join(save_dir, img_url.split(&#39;\/&#39;)[-1])<\/p>\n<p>        with open(img_name, &#39;wb&#39;) as img_file:<\/p>\n<p>            img_file.write(img_data)<\/p>\n<p>        print(f&quot;Downloaded {img_name}&quot;)<\/p>\n<p>    except Exception as e:<\/p>\n<p>        print(f&quot;Failed to download {img_url}: {e}&quot;)<\/p>\n<p>async def download_images(image_urls, save_dir):<\/p>\n<p>    if not os.path.exists(save_dir):<\/p>\n<p>        os.makedirs(save_dir)<\/p>\n<p>    async with aiohttp.ClientSession() as session:<\/p>\n<p>        tasks = []<\/p>\n<p>        for img_url in image_urls:<\/p>\n<p>            task = download_image(session, img_url, save_dir)<\/p>\n<p>            tasks.append(task)<\/p>\n<p>        await asyncio.gather(*tasks)<\/p>\n<p>async def main(url, save_dir):<\/p>\n<p>    image_urls = await get_image_urls(url)<\/p>\n<p>    await download_images(image_urls, save_dir)<\/p>\n<p>if __name__ == &quot;__main__&quot;:<\/p>\n<p>    url = &quot;https:\/\/example.com&quot;<\/p>\n<p>    save_dir = &quot;.\/images&quot;<\/p>\n<p>    asyncio.run(main(url, save_dir))<\/p>\n<p><\/code><\/pre>\n<\/p>\n<p><h3>\u4e94\u3001\u603b\u7ed3<\/h3>\n<\/p>\n<p><p>\u901a\u8fc7\u4e0a\u8ff0\u65b9\u6cd5\uff0c\u6211\u4eec\u53ef\u4ee5\u4f7f\u7528Python\u5b9e\u73b0\u6279\u91cf\u4e0b\u8f7d\u56fe\u7247\u7684\u529f\u80fd\u3002<strong>\u4f7f\u7528requests\u5e93\u548cBeautifulSoup\u5e93\u53ef\u4ee5\u65b9\u4fbf\u5730\u63d0\u53d6\u56fe\u7247URL\uff0c\u5e76\u4e0b\u8f7d\u56fe\u7247<\/strong>\uff0c\u540c\u65f6\u53ef\u4ee5\u6dfb\u52a0\u5f02\u5e38\u5904\u7406\uff0c\u63d0\u9ad8\u7a0b\u5e8f\u7684\u5065\u58ee\u6027\u3002\u6b64\u5916\uff0c<strong>\u901a\u8fc7\u4f7f\u7528\u591a\u7ebf\u7a0b\u6216\u5f02\u6b65\u7f16\u7a0b\uff0c\u53ef\u4ee5\u663e\u8457\u63d0\u9ad8\u56fe\u7247\u4e0b\u8f7d\u901f\u5ea6<\/strong>\u3002\u5e0c\u671b\u672c\u6587\u5bf9\u60a8\u6709\u6240\u5e2e\u52a9\uff0c\u80fd\u591f\u5e2e\u52a9\u60a8\u9ad8\u6548\u5730\u7ba1\u7406\u56fe\u7247\u4e0b\u8f7d\u4efb\u52a1\u3002<\/p>\n<\/p>\n<h2><strong>\u76f8\u5173\u95ee\u7b54FAQs\uff1a<\/strong><\/h2>\n<p> <strong>\u5982\u4f55\u9009\u62e9\u5408\u9002\u7684Python\u5e93\u6765\u6279\u91cf\u4e0b\u8f7d\u56fe\u7247\uff1f<\/strong><br \/>\u5728Python\u4e2d\uff0c\u6709\u51e0\u4e2a\u6d41\u884c\u7684\u5e93\u53ef\u4ee5\u5e2e\u52a9\u4f60\u6279\u91cf\u4e0b\u8f7d\u56fe\u7247\uff0c\u6bd4\u5982<code>requests<\/code>\u548c<code>BeautifulSoup<\/code>\u3002<code>requests<\/code>\u5e93\u7528\u4e8e\u53d1\u9001\u7f51\u7edc\u8bf7\u6c42\uff0c\u800c<code>BeautifulSoup<\/code>\u5219\u7528\u4e8e\u89e3\u6790HTML\u9875\u9762\uff0c\u63d0\u53d6\u56fe\u7247\u94fe\u63a5\u3002<code>Scrapy<\/code>\u662f\u53e6\u4e00\u4e2a\u529f\u80fd\u5f3a\u5927\u7684\u6846\u67b6\uff0c\u9002\u5408\u9700\u8981\u5904\u7406\u5927\u91cf\u6570\u636e\u6216\u590d\u6742\u7f51\u7ad9\u7684\u60c5\u51b5\u3002\u9009\u62e9\u5408\u9002\u7684\u5e93\u53d6\u51b3\u4e8e\u4f60\u7684\u9700\u6c42\u548c\u9879\u76ee\u7684\u590d\u6742\u6027\u3002<\/p>\n<p><strong>\u5982\u4f55\u786e\u4fdd\u4e0b\u8f7d\u7684\u56fe\u7247\u8d28\u91cf\u4e0d\u53d7\u5f71\u54cd\uff1f<\/strong><br \/>\u4e0b\u8f7d\u56fe\u7247\u65f6\uff0c\u786e\u4fdd\u4f7f\u7528\u9ad8\u8d28\u91cf\u7684\u56fe\u7247\u94fe\u63a5\u975e\u5e38\u91cd\u8981\u3002\u4f60\u53ef\u4ee5\u901a\u8fc7\u68c0\u67e5\u56fe\u7247\u7684URL\uff0c\u901a\u5e38\u9ad8\u5206\u8fa8\u7387\u7684\u56fe\u7247\u94fe\u63a5\u4f1a\u5305\u542b\u201clarge\u201d\u6216\u201chighres\u201d\u7b49\u5173\u952e\u8bcd\u3002\u6b64\u5916\uff0c\u5c3d\u91cf\u907f\u514d\u76f4\u63a5\u4ece\u7f29\u7565\u56fe\u94fe\u63a5\u4e0b\u8f7d\uff0c\u800c\u5e94\u8be5\u83b7\u53d6\u539f\u59cb\u56fe\u50cf\u7684\u94fe\u63a5\uff0c\u4ee5\u4fdd\u8bc1\u4e0b\u8f7d\u7684\u56fe\u7247\u8d28\u91cf\u7b26\u5408\u8981\u6c42\u3002<\/p>\n<p><strong>\u5982\u4f55\u5904\u7406\u4e0b\u8f7d\u8fc7\u7a0b\u4e2d\u53ef\u80fd\u51fa\u73b0\u7684\u9519\u8bef\uff1f<\/strong><br \/>\u5728\u6279\u91cf\u4e0b\u8f7d\u56fe\u7247\u65f6\uff0c\u7f51\u7edc\u8fde\u63a5\u95ee\u9898\u3001URL\u9519\u8bef\u6216\u6587\u4ef6\u6743\u9650\u95ee\u9898\u53ef\u80fd\u5bfc\u81f4\u4e0b\u8f7d\u5931\u8d25\u3002\u4f7f\u7528<code>try-except<\/code>\u8bed\u53e5\u53ef\u4ee5\u6709\u6548\u6355\u83b7\u8fd9\u4e9b\u9519\u8bef\uff0c\u5e76\u91c7\u53d6\u76f8\u5e94\u7684\u63aa\u65bd\uff0c\u6bd4\u5982\u91cd\u8bd5\u4e0b\u8f7d\u6216\u8bb0\u5f55\u9519\u8bef\u65e5\u5fd7\u3002\u6b64\u5916\uff0c\u8bbe\u7f6e\u9002\u5f53\u7684\u8d85\u65f6\u65f6\u95f4\u548c\u91cd\u8bd5\u673a\u5236\u53ef\u4ee5\u63d0\u9ad8\u4e0b\u8f7d\u7684\u7a33\u5b9a\u6027\uff0c\u786e\u4fdd\u5c3d\u53ef\u80fd\u591a\u7684\u56fe\u7247\u6210\u529f\u4e0b\u8f7d\u3002<\/p>\n","protected":false},"excerpt":{"rendered":"\u7528Python\u6279\u91cf\u4e0b\u8f7d\u56fe\u7247\u7684\u65b9\u6cd5\u6709\u5f88\u591a\uff0c\u5e38\u7528\u7684\u65b9\u6cd5\u5305\u62ec\u4f7f\u7528requests\u3001BeautifulSoup\u548curl [&hellip;]","protected":false},"author":3,"featured_media":1075050,"comment_status":"closed","ping_status":"","sticky":false,"template":"","format":"standard","meta":{"_acf_changed":false,"footnotes":""},"categories":[37],"tags":[],"acf":[],"_links":{"self":[{"href":"https:\/\/docs.pingcode.com\/wp-json\/wp\/v2\/posts\/1075047"}],"collection":[{"href":"https:\/\/docs.pingcode.com\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/docs.pingcode.com\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/docs.pingcode.com\/wp-json\/wp\/v2\/users\/3"}],"replies":[{"embeddable":true,"href":"https:\/\/docs.pingcode.com\/wp-json\/wp\/v2\/comments?post=1075047"}],"version-history":[{"count":"1","href":"https:\/\/docs.pingcode.com\/wp-json\/wp\/v2\/posts\/1075047\/revisions"}],"predecessor-version":[{"id":1075051,"href":"https:\/\/docs.pingcode.com\/wp-json\/wp\/v2\/posts\/1075047\/revisions\/1075051"}],"wp:featuredmedia":[{"embeddable":true,"href":"https:\/\/docs.pingcode.com\/wp-json\/wp\/v2\/media\/1075050"}],"wp:attachment":[{"href":"https:\/\/docs.pingcode.com\/wp-json\/wp\/v2\/media?parent=1075047"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/docs.pingcode.com\/wp-json\/wp\/v2\/categories?post=1075047"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/docs.pingcode.com\/wp-json\/wp\/v2\/tags?post=1075047"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}