mirror of
https://github.com/python/cpython.git
synced 2025-09-26 18:29:57 +00:00
make sure the crawler can browse file-based indexes under win32
This commit is contained in:
parent
76ad4f0ec9
commit
cc243cc808
2 changed files with 25 additions and 7 deletions
|
@ -123,8 +123,14 @@ class Crawler(BaseClient):
|
||||||
self.follow_externals = follow_externals
|
self.follow_externals = follow_externals
|
||||||
|
|
||||||
# mirroring attributes.
|
# mirroring attributes.
|
||||||
if not index_url.endswith("/"):
|
parsed = urllib.parse.urlparse(index_url)
|
||||||
index_url += "/"
|
self.scheme = parsed[0]
|
||||||
|
if self.scheme == 'file':
|
||||||
|
ender = os.path.sep
|
||||||
|
else:
|
||||||
|
ender = '/'
|
||||||
|
if not index_url.endswith(ender):
|
||||||
|
index_url += ender
|
||||||
# if no mirrors are defined, use the method described in PEP 381.
|
# if no mirrors are defined, use the method described in PEP 381.
|
||||||
if mirrors is None:
|
if mirrors is None:
|
||||||
mirrors = get_mirrors(mirrors_url)
|
mirrors = get_mirrors(mirrors_url)
|
||||||
|
@ -376,7 +382,11 @@ class Crawler(BaseClient):
|
||||||
:param name: the name of the project to find the page
|
:param name: the name of the project to find the page
|
||||||
"""
|
"""
|
||||||
# Browse and index the content of the given PyPI page.
|
# Browse and index the content of the given PyPI page.
|
||||||
url = self.index_url + name + "/"
|
if self.scheme == 'file':
|
||||||
|
ender = os.path.sep
|
||||||
|
else:
|
||||||
|
ender = '/'
|
||||||
|
url = self.index_url + name + ender
|
||||||
self._process_url(url, name)
|
self._process_url(url, name)
|
||||||
|
|
||||||
@socket_timeout()
|
@socket_timeout()
|
||||||
|
@ -395,7 +405,7 @@ class Crawler(BaseClient):
|
||||||
|
|
||||||
# add index.html automatically for filesystem paths
|
# add index.html automatically for filesystem paths
|
||||||
if scheme == 'file':
|
if scheme == 'file':
|
||||||
if url.endswith('/'):
|
if url.endswith(os.path.sep):
|
||||||
url += "index.html"
|
url += "index.html"
|
||||||
|
|
||||||
# add authorization headers if auth is provided
|
# add authorization headers if auth is provided
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
"""Tests for the packaging.pypi.simple module."""
|
"""Tests for the packaging.pypi.simple module."""
|
||||||
|
import re
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import http.client
|
import http.client
|
||||||
|
@ -277,8 +277,16 @@ class SimpleCrawlerTestCase(TempdirManager,
|
||||||
|
|
||||||
def test_browse_local_files(self):
|
def test_browse_local_files(self):
|
||||||
# Test that we can browse local files"""
|
# Test that we can browse local files"""
|
||||||
index_path = os.sep.join(["file://" + PYPI_DEFAULT_STATIC_PATH,
|
index_url = "file://" + PYPI_DEFAULT_STATIC_PATH
|
||||||
"test_found_links", "simple"])
|
if sys.platform == 'win32':
|
||||||
|
# under windows the correct syntax is:
|
||||||
|
# file:///C|\the\path\here
|
||||||
|
# instead of
|
||||||
|
# file://C:\the\path\here
|
||||||
|
fix = re.compile(r'^(file://)([A-Za-z])(:)')
|
||||||
|
index_url = fix.sub('\\1/\\2|', index_url)
|
||||||
|
|
||||||
|
index_path = os.sep.join([index_url, "test_found_links", "simple"])
|
||||||
crawler = Crawler(index_path)
|
crawler = Crawler(index_path)
|
||||||
dists = crawler.get_releases("foobar")
|
dists = crawler.get_releases("foobar")
|
||||||
self.assertEqual(4, len(dists))
|
self.assertEqual(4, len(dists))
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue