mirror of
https://github.com/python/cpython.git
synced 2025-09-27 02:39:58 +00:00
Some cleanup -- don't use splitfields/joinfields, standardize
indentation (tabs only), rationalize some code in urljoin...
This commit is contained in:
parent
3bb1edb328
commit
a25d7ddbf0
1 changed files with 16 additions and 18 deletions
|
@ -6,7 +6,7 @@ UC Irvine, June 1995.
|
||||||
|
|
||||||
# Standard/builtin Python modules
|
# Standard/builtin Python modules
|
||||||
import string
|
import string
|
||||||
from string import joinfields, splitfields, rfind
|
from string import join, split, rfind
|
||||||
|
|
||||||
# A classification of schemes ('' means apply by default)
|
# A classification of schemes ('' means apply by default)
|
||||||
uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'wais', 'file',
|
uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'wais', 'file',
|
||||||
|
@ -37,9 +37,9 @@ MAX_CACHE_SIZE = 20
|
||||||
_parse_cache = {}
|
_parse_cache = {}
|
||||||
|
|
||||||
def clear_cache():
|
def clear_cache():
|
||||||
"""Clear the parse cache."""
|
"""Clear the parse cache."""
|
||||||
global _parse_cache
|
global _parse_cache
|
||||||
_parse_cache = {}
|
_parse_cache = {}
|
||||||
|
|
||||||
|
|
||||||
def urlparse(url, scheme = '', allow_fragments = 1):
|
def urlparse(url, scheme = '', allow_fragments = 1):
|
||||||
|
@ -53,7 +53,7 @@ def urlparse(url, scheme = '', allow_fragments = 1):
|
||||||
if cached:
|
if cached:
|
||||||
return cached
|
return cached
|
||||||
if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
|
if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
|
||||||
clear_cache()
|
clear_cache()
|
||||||
find = string.find
|
find = string.find
|
||||||
netloc = path = params = query = fragment = ''
|
netloc = path = params = query = fragment = ''
|
||||||
i = find(url, ':')
|
i = find(url, ':')
|
||||||
|
@ -151,10 +151,8 @@ def urljoin(base, url, allow_fragments = 1):
|
||||||
if not path:
|
if not path:
|
||||||
return urlunparse((scheme, netloc, bpath,
|
return urlunparse((scheme, netloc, bpath,
|
||||||
params, query or bquery, fragment))
|
params, query or bquery, fragment))
|
||||||
i = rfind(bpath, '/')
|
segments = split(bpath, '/')[:-1] + split(path, '/')
|
||||||
if i >= 0:
|
# XXX The stuff below is bogus in various ways...
|
||||||
path = bpath[:i] + '/' + path
|
|
||||||
segments = splitfields(path, '/')
|
|
||||||
if segments[-1] == '.':
|
if segments[-1] == '.':
|
||||||
segments[-1] = ''
|
segments[-1] = ''
|
||||||
while '.' in segments:
|
while '.' in segments:
|
||||||
|
@ -173,19 +171,19 @@ def urljoin(base, url, allow_fragments = 1):
|
||||||
segments[-1] = ''
|
segments[-1] = ''
|
||||||
elif len(segments) >= 2 and segments[-1] == '..':
|
elif len(segments) >= 2 and segments[-1] == '..':
|
||||||
segments[-2:] = ['']
|
segments[-2:] = ['']
|
||||||
return urlunparse((scheme, netloc, joinfields(segments, '/'),
|
return urlunparse((scheme, netloc, join(segments, '/'),
|
||||||
params, query, fragment))
|
params, query, fragment))
|
||||||
|
|
||||||
def urldefrag(url):
|
def urldefrag(url):
|
||||||
"""Removes any existing fragment from URL.
|
"""Removes any existing fragment from URL.
|
||||||
|
|
||||||
Returns a tuple of the defragmented URL and the fragment. If
|
Returns a tuple of the defragmented URL and the fragment. If
|
||||||
the URL contained no fragments, the second element is the
|
the URL contained no fragments, the second element is the
|
||||||
empty string.
|
empty string.
|
||||||
"""
|
"""
|
||||||
s, n, p, a, q, frag = urlparse(url)
|
s, n, p, a, q, frag = urlparse(url)
|
||||||
defrag = urlunparse((s, n, p, a, q, ''))
|
defrag = urlunparse((s, n, p, a, q, ''))
|
||||||
return defrag, frag
|
return defrag, frag
|
||||||
|
|
||||||
|
|
||||||
test_input = """
|
test_input = """
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue