More old urllib usage.

This commit is contained in:
Georg Brandl 2008-06-23 11:45:20 +00:00
parent 029986af24
commit 7d84055e25
4 changed files with 19 additions and 19 deletions

View file

@ -138,8 +138,8 @@ def load_my_cookie():
value = cookies[COOKIE_NAME] value = cookies[COOKIE_NAME]
except KeyError: except KeyError:
return {} return {}
import urllib import urllib.parse
value = urllib.unquote(value) value = urllib.parse.unquote(value)
words = value.split('/') words = value.split('/')
while len(words) < 3: while len(words) < 3:
words.append('') words.append('')
@ -153,8 +153,8 @@ def load_my_cookie():
def send_my_cookie(ui): def send_my_cookie(ui):
name = COOKIE_NAME name = COOKIE_NAME
value = "%s/%s/%s" % (ui.author, ui.email, ui.password) value = "%s/%s/%s" % (ui.author, ui.email, ui.password)
import urllib import urllib.parse
value = urllib.quote(value) value = urllib.parse.quote(value)
then = now + COOKIE_LIFETIME then = now + COOKIE_LIFETIME
gmt = time.gmtime(then) gmt = time.gmtime(then)
path = os.environ.get('SCRIPT_NAME', '/cgi-bin/') path = os.environ.get('SCRIPT_NAME', '/cgi-bin/')

View file

@ -1,5 +1,5 @@
"""pyversioncheck - Module to help with checking versions""" """pyversioncheck - Module to help with checking versions"""
import urllib import urllib.request
import email import email
import sys import sys
@ -47,7 +47,7 @@ def _check1version(package, url, version, verbose=0):
if verbose >= VERBOSE_EACHFILE: if verbose >= VERBOSE_EACHFILE:
print(' Checking %s'%url) print(' Checking %s'%url)
try: try:
fp = urllib.urlopen(url) fp = urllib.request.urlopen(url)
except IOError as arg: except IOError as arg:
if verbose >= VERBOSE_EACHFILE: if verbose >= VERBOSE_EACHFILE:
print(' Cannot open:', arg) print(' Cannot open:', arg)

View file

@ -113,13 +113,13 @@ import io
import getopt import getopt
import pickle import pickle
import urllib import urllib.request
import urlparse import urllib.parse as urlparse
import sgmllib import sgmllib
import cgi import cgi
import mimetypes import mimetypes
import robotparser from urllib import robotparser
# Extract real version number if necessary # Extract real version number if necessary
if __version__[0] == '$': if __version__[0] == '$':
@ -487,7 +487,7 @@ class Checker:
if url in self.name_table: if url in self.name_table:
return self.name_table[url] return self.name_table[url]
scheme, path = urllib.splittype(url) scheme, path = urllib.request.splittype(url)
if scheme in ('mailto', 'news', 'javascript', 'telnet'): if scheme in ('mailto', 'news', 'javascript', 'telnet'):
self.note(1, " Not checking %s URL" % scheme) self.note(1, " Not checking %s URL" % scheme)
return None return None
@ -733,13 +733,13 @@ class MyStringIO(io.StringIO):
return self.__url return self.__url
class MyURLopener(urllib.FancyURLopener): class MyURLopener(urllib.request.FancyURLopener):
http_error_default = urllib.URLopener.http_error_default http_error_default = urllib.request.URLopener.http_error_default
def __init__(*args): def __init__(*args):
self = args[0] self = args[0]
urllib.FancyURLopener.__init__(*args) urllib.request.FancyURLopener.__init__(*args)
self.addheaders = [ self.addheaders = [
('User-agent', 'Python-webchecker/%s' % __version__), ('User-agent', 'Python-webchecker/%s' % __version__),
] ]
@ -769,7 +769,7 @@ class MyURLopener(urllib.FancyURLopener):
s.write('<A HREF="%s">%s</A>\n' % (q, q)) s.write('<A HREF="%s">%s</A>\n' % (q, q))
s.seek(0) s.seek(0)
return s return s
return urllib.FancyURLopener.open_file(self, url) return urllib.request.FancyURLopener.open_file(self, url)
class MyHTMLParser(sgmllib.SGMLParser): class MyHTMLParser(sgmllib.SGMLParser):

View file

@ -6,8 +6,8 @@ __version__ = "$Revision$"
import os import os
import sys import sys
import urllib
import getopt import getopt
import urllib.parse
import webchecker import webchecker
@ -87,11 +87,11 @@ class Sucker(webchecker.Checker):
self.message("didn't save %s: %s", path, str(msg)) self.message("didn't save %s: %s", path, str(msg))
def savefilename(self, url): def savefilename(self, url):
type, rest = urllib.splittype(url) type, rest = urllib.parse.splittype(url)
host, path = urllib.splithost(rest) host, path = urllib.parse.splithost(rest)
path = path.lstrip("/") path = path.lstrip("/")
user, host = urllib.splituser(host) user, host = urllib.parse.splituser(host)
host, port = urllib.splitnport(host) host, port = urllib.parse.splitnport(host)
host = host.lower() host = host.lower()
if not path or path[-1] == "/": if not path or path[-1] == "/":
path = path + "index.html" path = path + "index.html"