feat(ST2.UtilPackages): bump up all packages

- Refresh PackageCache with latest versions of everything
This commit is contained in:
Iristyle
2013-09-16 22:35:46 -04:00
parent 7195197f0f
commit a000ce8acc
451 changed files with 14151 additions and 8317 deletions

View File

@@ -0,0 +1,11 @@
import os
if os.name == 'nt':
from .wininet_downloader import WinINetDownloader
DOWNLOADERS = [WinINetDownloader]
else:
from .urllib_downloader import UrlLibDownloader
from .curl_downloader import CurlDownloader
from .wget_downloader import WgetDownloader
DOWNLOADERS = [UrlLibDownloader, CurlDownloader, WgetDownloader]

View File

@@ -0,0 +1,62 @@
import threading
class BackgroundDownloader(threading.Thread):
"""
Downloads information from one or more URLs in the background.
Normal usage is to use one BackgroundDownloader per domain name.
:param settings:
A dict containing at least the following fields:
`cache_length`,
`debug`,
`timeout`,
`user_agent`,
`http_proxy`,
`https_proxy`,
`proxy_username`,
`proxy_password`
:param providers:
An array of providers that can download the URLs
"""
def __init__(self, settings, providers):
self.settings = settings
self.urls = []
self.providers = providers
self.used_providers = {}
threading.Thread.__init__(self)
def add_url(self, url):
"""
Adds a URL to the list to download
:param url:
The URL to download info about
"""
self.urls.append(url)
def get_provider(self, url):
"""
Returns the provider for the URL specified
:param url:
The URL to return the provider for
:return:
The provider object for the URL
"""
return self.used_providers[url]
def run(self):
for url in self.urls:
for provider_class in self.providers:
if provider_class.match_url(url):
provider = provider_class(url, self.settings)
break
provider.prefetch()
self.used_providers[url] = provider

View File

@@ -0,0 +1,4 @@
class BinaryNotFoundError(Exception):
"""If a necessary executable is not found in the PATH on the system"""
pass

View File

@@ -0,0 +1,185 @@
import sys
import re
import json
import hashlib
from ..console_write import console_write
class CachingDownloader(object):
"""
A base downloader that will use a caching backend to cache HTTP requests
and make conditional requests.
"""
def add_conditional_headers(self, url, headers):
"""
Add `If-Modified-Since` and `If-None-Match` headers to a request if a
cached copy exists
:param headers:
A dict with the request headers
:return:
The request headers dict, possibly with new headers added
"""
if not self.settings.get('cache'):
return headers
info_key = self.generate_key(url, '.info')
info_json = self.settings['cache'].get(info_key)
if not info_json:
return headers
# Make sure we have the cached content to use if we get a 304
key = self.generate_key(url)
if not self.settings['cache'].has(key):
return headers
try:
info = json.loads(info_json.decode('utf-8'))
except ValueError:
return headers
etag = info.get('etag')
if etag:
headers['If-None-Match'] = etag
last_modified = info.get('last-modified')
if last_modified:
headers['If-Modified-Since'] = last_modified
return headers
def cache_result(self, method, url, status, headers, content):
"""
Processes a request result, either caching the result, or returning
the cached version of the url.
:param method:
The HTTP method used for the request
:param url:
The url of the request
:param status:
The numeric response status of the request
:param headers:
A dict of reponse headers, with keys being lowercase
:param content:
The response content
:return:
The response content
"""
debug = self.settings.get('debug', False)
if not self.settings.get('cache'):
if debug:
console_write(u"Skipping cache since there is no cache object", True)
return content
if method.lower() != 'get':
if debug:
console_write(u"Skipping cache since the HTTP method != GET", True)
return content
status = int(status)
# Don't do anything unless it was successful or not modified
if status not in [200, 304]:
if debug:
console_write(u"Skipping cache since the HTTP status code not one of: 200, 304", True)
return content
key = self.generate_key(url)
if status == 304:
cached_content = self.settings['cache'].get(key)
if cached_content:
if debug:
console_write(u"Using cached content for %s" % url, True)
return cached_content
# If we got a 304, but did not have the cached content
# stop here so we don't cache an empty response
return content
# If we got here, the status is 200
# Respect some basic cache control headers
cache_control = headers.get('cache-control', '')
if cache_control:
fields = re.split(',\s*', cache_control)
for field in fields:
if field == 'no-store':
return content
# Don't ever cache zip/binary files for the sake of hard drive space
if headers.get('content-type') in ['application/zip', 'application/octet-stream']:
if debug:
console_write(u"Skipping cache since the response is a zip file", True)
return content
etag = headers.get('etag')
last_modified = headers.get('last-modified')
if not etag and not last_modified:
return content
struct = {'etag': etag, 'last-modified': last_modified}
struct_json = json.dumps(struct, indent=4)
info_key = self.generate_key(url, '.info')
if debug:
console_write(u"Caching %s in %s" % (url, key), True)
self.settings['cache'].set(info_key, struct_json.encode('utf-8'))
self.settings['cache'].set(key, content)
return content
def generate_key(self, url, suffix=''):
"""
Generates a key to store the cache under
:param url:
The URL being cached
:param suffix:
A string to append to the key
:return:
A string key for the URL
"""
if sys.version_info >= (3,) or isinstance(url, unicode):
url = url.encode('utf-8')
key = hashlib.md5(url).hexdigest()
return key + suffix
def retrieve_cached(self, url):
"""
Tries to return the cached content for a URL
:param url:
The URL to get the cached content for
:return:
The cached content
"""
key = self.generate_key(url)
if not self.settings['cache'].has(key):
return False
if self.settings.get('debug'):
console_write(u"Using cached content for %s" % url, True)
return self.settings['cache'].get(key)

View File

@@ -0,0 +1,203 @@
import os
import re
import json
import sublime
from ..console_write import console_write
from ..open_compat import open_compat, read_compat
from ..package_io import read_package_file
from ..cache import get_cache
from ..ca_certs import get_system_ca_bundle_path
from .no_ca_cert_exception import NoCaCertException
from .downloader_exception import DownloaderException
class CertProvider(object):
"""
A base downloader that provides access to a ca-bundle for validating
SSL certificates.
"""
def check_certs(self, domain, timeout):
"""
Ensures that the SSL CA cert for a domain is present on the machine
:param domain:
The domain to ensure there is a CA cert for
:param timeout:
The int timeout for downloading the CA cert from the channel
:raises:
NoCaCertException: when a suitable CA cert could not be found
:return:
The CA cert bundle path
"""
# Try to use the system CA bundle
ca_bundle_path = get_system_ca_bundle_path(self.settings)
if ca_bundle_path:
return ca_bundle_path
# If the system bundle did not work, fall back to our CA distribution
# system. Hopefully this will be going away soon.
if self.settings.get('debug'):
console_write(u'Unable to find system CA cert bundle, falling back to certs provided by Package Control')
cert_match = False
certs_list = get_cache('*.certs', self.settings.get('certs', {}))
ca_bundle_path = os.path.join(sublime.packages_path(), 'User', 'Package Control.ca-bundle')
if not os.path.exists(ca_bundle_path) or os.stat(ca_bundle_path).st_size == 0:
bundle_contents = read_package_file('Package Control', 'Package Control.ca-bundle', True)
if not bundle_contents:
raise NoCaCertException(u'Unable to copy distributed Package Control.ca-bundle', domain)
with open_compat(ca_bundle_path, 'wb') as f:
f.write(bundle_contents)
cert_info = certs_list.get(domain)
if cert_info:
cert_match = self.locate_cert(cert_info[0],
cert_info[1], domain, timeout)
wildcard_info = certs_list.get('*')
if wildcard_info:
cert_match = self.locate_cert(wildcard_info[0],
wildcard_info[1], domain, timeout) or cert_match
if not cert_match:
raise NoCaCertException(u'No CA certs available for %s' % domain, domain)
return ca_bundle_path
def locate_cert(self, cert_id, location, domain, timeout):
"""
Makes sure the SSL cert specified has been added to the CA cert
bundle that is present on the machine
:param cert_id:
The identifier for CA cert(s). For those provided by the channel
system, this will be an md5 of the contents of the cert(s). For
user-provided certs, this is something they provide.
:param location:
An http(s) URL, or absolute filesystem path to the CA cert(s)
:param domain:
The domain to ensure there is a CA cert for
:param timeout:
The int timeout for downloading the CA cert from the channel
:return:
If the cert specified (by cert_id) is present on the machine and
part of the Package Control.ca-bundle file in the User package folder
"""
ca_list_path = os.path.join(sublime.packages_path(), 'User', 'Package Control.ca-list')
if not os.path.exists(ca_list_path) or os.stat(ca_list_path).st_size == 0:
list_contents = read_package_file('Package Control', 'Package Control.ca-list')
if not list_contents:
raise NoCaCertException(u'Unable to copy distributed Package Control.ca-list', domain)
with open_compat(ca_list_path, 'w') as f:
f.write(list_contents)
ca_certs = []
with open_compat(ca_list_path, 'r') as f:
ca_certs = json.loads(read_compat(f))
if not cert_id in ca_certs:
if str(location) != '':
if re.match('^https?://', location):
contents = self.download_cert(cert_id, location, domain,
timeout)
else:
contents = self.load_cert(cert_id, location, domain)
if contents:
self.save_cert(cert_id, contents)
return True
return False
return True
def download_cert(self, cert_id, url, domain, timeout):
"""
Downloads CA cert(s) from a URL
:param cert_id:
The identifier for CA cert(s). For those provided by the channel
system, this will be an md5 of the contents of the cert(s). For
user-provided certs, this is something they provide.
:param url:
An http(s) URL to the CA cert(s)
:param domain:
The domain to ensure there is a CA cert for
:param timeout:
The int timeout for downloading the CA cert from the channel
:return:
The contents of the CA cert(s)
"""
cert_downloader = self.__class__(self.settings)
if self.settings.get('debug'):
console_write(u"Downloading CA cert for %s from \"%s\"" % (domain, url), True)
return cert_downloader.download(url,
'Error downloading CA certs for %s.' % domain, timeout, 1)
def load_cert(self, cert_id, path, domain):
"""
Copies CA cert(s) from a file path
:param cert_id:
The identifier for CA cert(s). For those provided by the channel
system, this will be an md5 of the contents of the cert(s). For
user-provided certs, this is something they provide.
:param path:
The absolute filesystem path to a file containing the CA cert(s)
:param domain:
The domain name the cert is for
:return:
The contents of the CA cert(s)
"""
if os.path.exists(path):
if self.settings.get('debug'):
console_write(u"Copying CA cert for %s from \"%s\"" % (domain, path), True)
with open_compat(path, 'rb') as f:
return f.read()
else:
raise NoCaCertException(u"Unable to find CA cert for %s at \"%s\"" % (domain, path), domain)
def save_cert(self, cert_id, contents):
"""
Saves CA cert(s) to the Package Control.ca-bundle
:param cert_id:
The identifier for CA cert(s). For those provided by the channel
system, this will be an md5 of the contents of the cert(s). For
user-provided certs, this is something they provide.
:param contents:
The contents of the CA cert(s)
"""
ca_bundle_path = os.path.join(sublime.packages_path(), 'User', 'Package Control.ca-bundle')
with open_compat(ca_bundle_path, 'ab') as f:
f.write(b"\n" + contents)
ca_list_path = os.path.join(sublime.packages_path(), 'User', 'Package Control.ca-list')
with open_compat(ca_list_path, 'r') as f:
ca_certs = json.loads(read_compat(f))
ca_certs.append(cert_id)
with open_compat(ca_list_path, 'w') as f:
f.write(json.dumps(ca_certs, indent=4))

View File

@@ -0,0 +1,81 @@
import os
import subprocess
from ..console_write import console_write
from ..cmd import create_cmd
from .non_clean_exit_error import NonCleanExitError
from .binary_not_found_error import BinaryNotFoundError
class CliDownloader(object):
"""
Base for downloaders that use a command line program
:param settings:
A dict of the various Package Control settings. The Sublime Text
Settings API is not used because this code is run in a thread.
"""
def __init__(self, settings):
self.settings = settings
def clean_tmp_file(self):
if os.path.exists(self.tmp_file):
os.remove(self.tmp_file)
def find_binary(self, name):
"""
Finds the given executable name in the system PATH
:param name:
The exact name of the executable to find
:return:
The absolute path to the executable
:raises:
BinaryNotFoundError when the executable can not be found
"""
dirs = os.environ['PATH'].split(os.pathsep)
if os.name != 'nt':
# This is mostly for OS X, which seems to launch ST with a
# minimal set of environmental variables
dirs.append('/usr/local/bin')
for dir_ in dirs:
path = os.path.join(dir_, name)
if os.path.exists(path):
return path
raise BinaryNotFoundError('The binary %s could not be located' % name)
def execute(self, args):
"""
Runs the executable and args and returns the result
:param args:
A list of the executable path and all arguments to be passed to it
:return:
The text output of the executable
:raises:
NonCleanExitError when the executable exits with an error
"""
if self.settings.get('debug'):
console_write(u"Trying to execute command %s" % create_cmd(args), True)
proc = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = proc.stdout.read()
self.stderr = proc.stderr.read()
returncode = proc.wait()
if returncode != 0:
error = NonCleanExitError(returncode)
error.stderr = self.stderr
error.stdout = output
raise error
return output

View File

@@ -0,0 +1,267 @@
import tempfile
import re
import os
from ..console_write import console_write
from ..open_compat import open_compat, read_compat
from .cli_downloader import CliDownloader
from .non_clean_exit_error import NonCleanExitError
from .rate_limit_exception import RateLimitException
from .downloader_exception import DownloaderException
from .cert_provider import CertProvider
from .limiting_downloader import LimitingDownloader
from .caching_downloader import CachingDownloader
class CurlDownloader(CliDownloader, CertProvider, LimitingDownloader, CachingDownloader):
"""
A downloader that uses the command line program curl
:param settings:
A dict of the various Package Control settings. The Sublime Text
Settings API is not used because this code is run in a thread.
:raises:
BinaryNotFoundError: when curl can not be found
"""
def __init__(self, settings):
self.settings = settings
self.curl = self.find_binary('curl')
def close(self):
"""
No-op for compatibility with UrllibDownloader and WinINetDownloader
"""
pass
def download(self, url, error_message, timeout, tries, prefer_cached=False):
"""
Downloads a URL and returns the contents
:param url:
The URL to download
:param error_message:
A string to include in the console error that is printed
when an error occurs
:param timeout:
The int number of seconds to set the timeout to
:param tries:
The int number of times to try and download the URL in the case of
a timeout or HTTP 503 error
:param prefer_cached:
If a cached version should be returned instead of trying a new request
:raises:
NoCaCertException: when no CA certs can be found for the url
RateLimitException: when a rate limit is hit
DownloaderException: when any other download error occurs
:return:
The string contents of the URL
"""
if prefer_cached:
cached = self.retrieve_cached(url)
if cached:
return cached
self.tmp_file = tempfile.NamedTemporaryFile().name
command = [self.curl, '--user-agent', self.settings.get('user_agent'),
'--connect-timeout', str(int(timeout)), '-sSL',
# Don't be alarmed if the response from the server does not select
# one of these since the server runs a relatively new version of
# OpenSSL which supports compression on the SSL layer, and Apache
# will use that instead of HTTP-level encoding.
'--compressed',
# We have to capture the headers to check for rate limit info
'--dump-header', self.tmp_file]
request_headers = self.add_conditional_headers(url, {})
for name, value in request_headers.items():
command.extend(['--header', "%s: %s" % (name, value)])
secure_url_match = re.match('^https://([^/]+)', url)
if secure_url_match != None:
secure_domain = secure_url_match.group(1)
bundle_path = self.check_certs(secure_domain, timeout)
command.extend(['--cacert', bundle_path])
debug = self.settings.get('debug')
if debug:
command.append('-v')
http_proxy = self.settings.get('http_proxy')
https_proxy = self.settings.get('https_proxy')
proxy_username = self.settings.get('proxy_username')
proxy_password = self.settings.get('proxy_password')
if debug:
console_write(u"Curl Debug Proxy", True)
console_write(u" http_proxy: %s" % http_proxy)
console_write(u" https_proxy: %s" % https_proxy)
console_write(u" proxy_username: %s" % proxy_username)
console_write(u" proxy_password: %s" % proxy_password)
if http_proxy or https_proxy:
command.append('--proxy-anyauth')
if proxy_username or proxy_password:
command.extend(['-U', u"%s:%s" % (proxy_username, proxy_password)])
if http_proxy:
os.putenv('http_proxy', http_proxy)
if https_proxy:
os.putenv('HTTPS_PROXY', https_proxy)
command.append(url)
error_string = None
while tries > 0:
tries -= 1
try:
output = self.execute(command)
with open_compat(self.tmp_file, 'r') as f:
headers_str = read_compat(f)
self.clean_tmp_file()
message = 'OK'
status = 200
headers = {}
for header in headers_str.splitlines():
if header[0:5] == 'HTTP/':
message = re.sub('^HTTP/\d\.\d\s+\d+\s*', '', header)
status = int(re.sub('^HTTP/\d\.\d\s+(\d+)(\s+.*)?$', '\\1', header))
continue
if header.strip() == '':
continue
name, value = header.split(':', 1)
headers[name.lower()] = value.strip()
if debug:
self.print_debug(self.stderr.decode('utf-8'))
self.handle_rate_limit(headers, url)
if status not in [200, 304]:
e = NonCleanExitError(22)
e.stderr = "%s %s" % (status, message)
raise e
output = self.cache_result('get', url, status, headers, output)
return output
except (NonCleanExitError) as e:
# Stderr is used for both the error message and the debug info
# so we need to process it to extra the debug info
if self.settings.get('debug'):
if hasattr(e.stderr, 'decode'):
e.stderr = e.stderr.decode('utf-8')
e.stderr = self.print_debug(e.stderr)
self.clean_tmp_file()
if e.returncode == 22:
code = re.sub('^.*?(\d+)([\w\s]+)?$', '\\1', e.stderr)
if code == '503' and tries != 0:
# GitHub and BitBucket seem to rate limit via 503
error_string = u'Downloading %s was rate limited' % url
if tries:
error_string += ', trying again'
if debug:
console_write(error_string, True)
continue
download_error = u'HTTP error ' + code
elif e.returncode == 6:
download_error = u'URL error host not found'
elif e.returncode == 28:
# GitHub and BitBucket seem to time out a lot
error_string = u'Downloading %s timed out' % url
if tries:
error_string += ', trying again'
if debug:
console_write(error_string, True)
continue
else:
download_error = e.stderr.rstrip()
error_string = u'%s %s downloading %s.' % (error_message, download_error, url)
break
raise DownloaderException(error_string)
def supports_ssl(self):
"""
Indicates if the object can handle HTTPS requests
:return:
If the object supports HTTPS requests
"""
return True
def print_debug(self, string):
"""
Takes debug output from curl and groups and prints it
:param string:
The complete debug output from curl
:return:
A string containing any stderr output
"""
section = 'General'
last_section = None
output = ''
for line in string.splitlines():
# Placeholder for body of request
if line and line[0:2] == '{ ':
continue
if line and line[0:18] == '} [data not shown]':
continue
if len(line) > 1:
subtract = 0
if line[0:2] == '* ':
section = 'General'
subtract = 2
elif line[0:2] == '> ':
section = 'Write'
subtract = 2
elif line[0:2] == '< ':
section = 'Read'
subtract = 2
line = line[subtract:]
# If the line does not start with "* ", "< ", "> " or " "
# then it is a real stderr message
if subtract == 0 and line[0:2] != ' ':
output += line.rstrip() + ' '
continue
if line.strip() == '':
continue
if section != last_section:
console_write(u"Curl HTTP Debug %s" % section, True)
console_write(u' ' + line)
last_section = section
return output.rstrip()

View File

@@ -0,0 +1,24 @@
import gzip
import zlib
try:
# Python 3
from io import BytesIO as StringIO
except (ImportError):
# Python 2
from StringIO import StringIO
class DecodingDownloader(object):
"""
A base for downloaders that provides the ability to decode gzipped
or deflated content.
"""
def decode_response(self, encoding, response):
if encoding == 'gzip':
return gzip.GzipFile(fileobj=StringIO(response)).read()
elif encoding == 'deflate':
decompresser = zlib.decompressobj(-zlib.MAX_WBITS)
return decompresser.decompress(response) + decompresser.flush()
return response

View File

@@ -0,0 +1,5 @@
class DownloaderException(Exception):
"""If a downloader could not download a URL"""
def __str__(self):
return self.args[0]

View File

@@ -0,0 +1,9 @@
class HttpError(Exception):
"""If a downloader was able to download a URL, but the result was not a 200 or 304"""
def __init__(self, message, code):
self.code = code
super(HttpError, self).__init__(message)
def __str__(self):
return self.args[0]

View File

@@ -0,0 +1,36 @@
try:
# Python 3
from urllib.parse import urlparse
except (ImportError):
# Python 2
from urlparse import urlparse
from .rate_limit_exception import RateLimitException
class LimitingDownloader(object):
"""
A base for downloaders that checks for rate limiting headers.
"""
def handle_rate_limit(self, headers, url):
"""
Checks the headers of a response object to make sure we are obeying the
rate limit
:param headers:
The dict-like object that contains lower-cased headers
:param url:
The URL that was requested
:raises:
RateLimitException when the rate limit has been hit
"""
limit_remaining = headers.get('x-ratelimit-remaining', '1')
limit = headers.get('x-ratelimit-limit', '1')
if str(limit_remaining) == '0':
hostname = urlparse(url).hostname
raise RateLimitException(hostname, limit)

View File

@@ -0,0 +1,11 @@
from .downloader_exception import DownloaderException
class NoCaCertException(DownloaderException):
"""
An exception for when there is no CA cert for a domain name
"""
def __init__(self, message, domain):
self.domain = domain
super(NoCaCertException, self).__init__(message)

View File

@@ -0,0 +1,13 @@
class NonCleanExitError(Exception):
"""
When an subprocess does not exit cleanly
:param returncode:
The command line integer return code of the subprocess
"""
def __init__(self, returncode):
self.returncode = returncode
def __str__(self):
return repr(self.returncode)

View File

@@ -0,0 +1,5 @@
class NonHttpError(Exception):
"""If a downloader had a non-clean exit, but it was not due to an HTTP error"""
def __str__(self):
return self.args[0]

View File

@@ -0,0 +1,13 @@
from .downloader_exception import DownloaderException
class RateLimitException(DownloaderException):
"""
An exception for when the rate limit of an API has been exceeded.
"""
def __init__(self, domain, limit):
self.domain = domain
self.limit = limit
message = u'Rate limit of %s exceeded for %s' % (limit, domain)
super(RateLimitException, self).__init__(message)

View File

@@ -0,0 +1,291 @@
import re
import os
import sys
from .. import http
try:
# Python 3
from http.client import HTTPException, BadStatusLine
from urllib.request import ProxyHandler, HTTPPasswordMgrWithDefaultRealm, ProxyBasicAuthHandler, ProxyDigestAuthHandler, build_opener, Request
from urllib.error import HTTPError, URLError
import urllib.request as urllib_compat
except (ImportError):
# Python 2
from httplib import HTTPException, BadStatusLine
from urllib2 import ProxyHandler, HTTPPasswordMgrWithDefaultRealm, ProxyBasicAuthHandler, ProxyDigestAuthHandler, build_opener, Request
from urllib2 import HTTPError, URLError
import urllib2 as urllib_compat
try:
# Python 3.3
import ConnectionError
except (ImportError):
# Python 2.6-3.2
from socket import error as ConnectionError
from ..console_write import console_write
from ..unicode import unicode_from_os
from ..http.validating_https_handler import ValidatingHTTPSHandler
from ..http.debuggable_http_handler import DebuggableHTTPHandler
from .rate_limit_exception import RateLimitException
from .downloader_exception import DownloaderException
from .cert_provider import CertProvider
from .decoding_downloader import DecodingDownloader
from .limiting_downloader import LimitingDownloader
from .caching_downloader import CachingDownloader
class UrlLibDownloader(CertProvider, DecodingDownloader, LimitingDownloader, CachingDownloader):
"""
A downloader that uses the Python urllib module
:param settings:
A dict of the various Package Control settings. The Sublime Text
Settings API is not used because this code is run in a thread.
"""
def __init__(self, settings):
self.opener = None
self.settings = settings
def close(self):
"""
Closes any persistent/open connections
"""
if not self.opener:
return
handler = self.get_handler()
if handler:
handler.close()
self.opener = None
def download(self, url, error_message, timeout, tries, prefer_cached=False):
"""
Downloads a URL and returns the contents
Uses the proxy settings from the Package Control.sublime-settings file,
however there seem to be a decent number of proxies that this code
does not work with. Patches welcome!
:param url:
The URL to download
:param error_message:
A string to include in the console error that is printed
when an error occurs
:param timeout:
The int number of seconds to set the timeout to
:param tries:
The int number of times to try and download the URL in the case of
a timeout or HTTP 503 error
:param prefer_cached:
If a cached version should be returned instead of trying a new request
:raises:
NoCaCertException: when no CA certs can be found for the url
RateLimitException: when a rate limit is hit
DownloaderException: when any other download error occurs
:return:
The string contents of the URL
"""
if prefer_cached:
cached = self.retrieve_cached(url)
if cached:
return cached
self.setup_opener(url, timeout)
debug = self.settings.get('debug')
error_string = None
while tries > 0:
tries -= 1
try:
request_headers = {
"User-Agent": self.settings.get('user_agent'),
# Don't be alarmed if the response from the server does not
# select one of these since the server runs a relatively new
# version of OpenSSL which supports compression on the SSL
# layer, and Apache will use that instead of HTTP-level
# encoding.
"Accept-Encoding": "gzip,deflate"
}
request_headers = self.add_conditional_headers(url, request_headers)
request = Request(url, headers=request_headers)
http_file = self.opener.open(request, timeout=timeout)
self.handle_rate_limit(http_file.headers, url)
result = http_file.read()
# Make sure the response is closed so we can re-use the connection
http_file.close()
encoding = http_file.headers.get('content-encoding')
result = self.decode_response(encoding, result)
return self.cache_result('get', url, http_file.getcode(),
http_file.headers, result)
except (HTTPException) as e:
# Since we use keep-alives, it is possible the other end closed
# the connection, and we may just need to re-open
if isinstance(e, BadStatusLine):
handler = self.get_handler()
if handler and handler.use_count > 1:
self.close()
self.setup_opener(url, timeout)
tries += 1
continue
error_string = u'%s HTTP exception %s (%s) downloading %s.' % (
error_message, e.__class__.__name__, unicode_from_os(e), url)
except (HTTPError) as e:
# Make sure the response is closed so we can re-use the connection
e.read()
e.close()
# Make sure we obey Github's rate limiting headers
self.handle_rate_limit(e.headers, url)
# Handle cached responses
if unicode_from_os(e.code) == '304':
return self.cache_result('get', url, int(e.code), e.headers, b'')
# Bitbucket and Github return 503 a decent amount
if unicode_from_os(e.code) == '503' and tries != 0:
error_string = u'Downloading %s was rate limited' % url
if tries:
error_string += ', trying again'
if debug:
console_write(error_string, True)
continue
error_string = u'%s HTTP error %s downloading %s.' % (
error_message, unicode_from_os(e.code), url)
except (URLError) as e:
# Bitbucket and Github timeout a decent amount
if unicode_from_os(e.reason) == 'The read operation timed out' \
or unicode_from_os(e.reason) == 'timed out':
error_string = u'Downloading %s timed out' % url
if tries:
error_string += ', trying again'
if debug:
console_write(error_string, True)
continue
error_string = u'%s URL error %s downloading %s.' % (
error_message, unicode_from_os(e.reason), url)
except (ConnectionError):
# Handle broken pipes/reset connections by creating a new opener, and
# thus getting new handlers and a new connection
error_string = u'Connection went away while trying to download %s, trying again' % url
if debug:
console_write(error_string, True)
self.opener = None
self.setup_opener(url, timeout)
tries += 1
continue
break
raise DownloaderException(error_string)
def get_handler(self):
"""
Get the HTTPHandler object for the current connection
"""
if not self.opener:
return None
for handler in self.opener.handlers:
if isinstance(handler, ValidatingHTTPSHandler) or isinstance(handler, DebuggableHTTPHandler):
return handler
def setup_opener(self, url, timeout):
"""
Sets up a urllib OpenerDirector to be used for requests. There is a
fair amount of custom urllib code in Package Control, and part of it
is to handle proxies and keep-alives. Creating an opener the way
below is because the handlers have been customized to send the
"Connection: Keep-Alive" header and hold onto connections so they
can be re-used.
:param url:
The URL to download
:param timeout:
The int number of seconds to set the timeout to
"""
if not self.opener:
http_proxy = self.settings.get('http_proxy')
https_proxy = self.settings.get('https_proxy')
if http_proxy or https_proxy:
proxies = {}
if http_proxy:
proxies['http'] = http_proxy
if https_proxy:
proxies['https'] = https_proxy
proxy_handler = ProxyHandler(proxies)
else:
proxy_handler = ProxyHandler()
password_manager = HTTPPasswordMgrWithDefaultRealm()
proxy_username = self.settings.get('proxy_username')
proxy_password = self.settings.get('proxy_password')
if proxy_username and proxy_password:
if http_proxy:
password_manager.add_password(None, http_proxy, proxy_username,
proxy_password)
if https_proxy:
password_manager.add_password(None, https_proxy, proxy_username,
proxy_password)
handlers = [proxy_handler]
basic_auth_handler = ProxyBasicAuthHandler(password_manager)
digest_auth_handler = ProxyDigestAuthHandler(password_manager)
handlers.extend([digest_auth_handler, basic_auth_handler])
debug = self.settings.get('debug')
if debug:
console_write(u"Urllib Debug Proxy", True)
console_write(u" http_proxy: %s" % http_proxy)
console_write(u" https_proxy: %s" % https_proxy)
console_write(u" proxy_username: %s" % proxy_username)
console_write(u" proxy_password: %s" % proxy_password)
secure_url_match = re.match('^https://([^/]+)', url)
if secure_url_match != None:
secure_domain = secure_url_match.group(1)
bundle_path = self.check_certs(secure_domain, timeout)
bundle_path = bundle_path.encode(sys.getfilesystemencoding())
handlers.append(ValidatingHTTPSHandler(ca_certs=bundle_path,
debug=debug, passwd=password_manager,
user_agent=self.settings.get('user_agent')))
else:
handlers.append(DebuggableHTTPHandler(debug=debug,
passwd=password_manager))
self.opener = build_opener(*handlers)
def supports_ssl(self):
"""
Indicates if the object can handle HTTPS requests
:return:
If the object supports HTTPS requests
"""
return 'ssl' in sys.modules and hasattr(urllib_compat, 'HTTPSHandler')

View File

@@ -0,0 +1,347 @@
import tempfile
import re
import os
from ..console_write import console_write
from ..unicode import unicode_from_os
from ..open_compat import open_compat, read_compat
from .cli_downloader import CliDownloader
from .non_http_error import NonHttpError
from .non_clean_exit_error import NonCleanExitError
from .rate_limit_exception import RateLimitException
from .downloader_exception import DownloaderException
from .cert_provider import CertProvider
from .decoding_downloader import DecodingDownloader
from .limiting_downloader import LimitingDownloader
from .caching_downloader import CachingDownloader
class WgetDownloader(CliDownloader, CertProvider, DecodingDownloader, LimitingDownloader, CachingDownloader):
"""
A downloader that uses the command line program wget
:param settings:
A dict of the various Package Control settings. The Sublime Text
Settings API is not used because this code is run in a thread.
:raises:
BinaryNotFoundError: when wget can not be found
"""
def __init__(self, settings):
self.settings = settings
self.debug = settings.get('debug')
self.wget = self.find_binary('wget')
def close(self):
"""
No-op for compatibility with UrllibDownloader and WinINetDownloader
"""
pass
def download(self, url, error_message, timeout, tries, prefer_cached=False):
"""
Downloads a URL and returns the contents
:param url:
The URL to download
:param error_message:
A string to include in the console error that is printed
when an error occurs
:param timeout:
The int number of seconds to set the timeout to
:param tries:
The int number of times to try and download the URL in the case of
a timeout or HTTP 503 error
:param prefer_cached:
If a cached version should be returned instead of trying a new request
:raises:
NoCaCertException: when no CA certs can be found for the url
RateLimitException: when a rate limit is hit
DownloaderException: when any other download error occurs
:return:
The string contents of the URL
"""
if prefer_cached:
cached = self.retrieve_cached(url)
if cached:
return cached
self.tmp_file = tempfile.NamedTemporaryFile().name
command = [self.wget, '--connect-timeout=' + str(int(timeout)), '-o',
self.tmp_file, '-O', '-', '-U', self.settings.get('user_agent')]
request_headers = {
# Don't be alarmed if the response from the server does not select
# one of these since the server runs a relatively new version of
# OpenSSL which supports compression on the SSL layer, and Apache
# will use that instead of HTTP-level encoding.
'Accept-Encoding': 'gzip,deflate'
}
request_headers = self.add_conditional_headers(url, request_headers)
for name, value in request_headers.items():
command.extend(['--header', "%s: %s" % (name, value)])
secure_url_match = re.match('^https://([^/]+)', url)
if secure_url_match != None:
secure_domain = secure_url_match.group(1)
bundle_path = self.check_certs(secure_domain, timeout)
command.append(u'--ca-certificate=' + bundle_path)
if self.debug:
command.append('-d')
else:
command.append('-S')
http_proxy = self.settings.get('http_proxy')
https_proxy = self.settings.get('https_proxy')
proxy_username = self.settings.get('proxy_username')
proxy_password = self.settings.get('proxy_password')
if proxy_username:
command.append(u"--proxy-user=%s" % proxy_username)
if proxy_password:
command.append(u"--proxy-password=%s" % proxy_password)
if self.debug:
console_write(u"Wget Debug Proxy", True)
console_write(u" http_proxy: %s" % http_proxy)
console_write(u" https_proxy: %s" % https_proxy)
console_write(u" proxy_username: %s" % proxy_username)
console_write(u" proxy_password: %s" % proxy_password)
command.append(url)
if http_proxy:
os.putenv('http_proxy', http_proxy)
if https_proxy:
os.putenv('https_proxy', https_proxy)
error_string = None
while tries > 0:
tries -= 1
try:
result = self.execute(command)
general, headers = self.parse_output()
encoding = headers.get('content-encoding')
if encoding:
result = self.decode_response(encoding, result)
result = self.cache_result('get', url, general['status'],
headers, result)
return result
except (NonCleanExitError) as e:
try:
general, headers = self.parse_output()
self.handle_rate_limit(headers, url)
if general['status'] == 304:
return self.cache_result('get', url, general['status'],
headers, None)
if general['status'] == 503 and tries != 0:
# GitHub and BitBucket seem to rate limit via 503
error_string = u'Downloading %s was rate limited' % url
if tries:
error_string += ', trying again'
if self.debug:
console_write(error_string, True)
continue
download_error = 'HTTP error %s' % general['status']
except (NonHttpError) as e:
download_error = unicode_from_os(e)
# GitHub and BitBucket seem to time out a lot
if download_error.find('timed out') != -1:
error_string = u'Downloading %s timed out' % url
if tries:
error_string += ', trying again'
if self.debug:
console_write(error_string, True)
continue
error_string = u'%s %s downloading %s.' % (error_message, download_error, url)
break
raise DownloaderException(error_string)
def supports_ssl(self):
"""
Indicates if the object can handle HTTPS requests
:return:
If the object supports HTTPS requests
"""
return True
def parse_output(self):
"""
Parses the wget output file, prints debug information and returns headers
:return:
A tuple of (general, headers) where general is a dict with the keys:
`version` - HTTP version number (string)
`status` - HTTP status code (integer)
`message` - HTTP status message (string)
And headers is a dict with the keys being lower-case version of the
HTTP header names.
"""
with open_compat(self.tmp_file, 'r') as f:
output = read_compat(f).splitlines()
self.clean_tmp_file()
error = None
header_lines = []
if self.debug:
section = 'General'
last_section = None
for line in output:
if section == 'General':
if self.skippable_line(line):
continue
# Skip blank lines
if line.strip() == '':
continue
# Error lines
if line[0:5] == 'wget:':
error = line[5:].strip()
if line[0:7] == 'failed:':
error = line[7:].strip()
if line == '---request begin---':
section = 'Write'
continue
elif line == '---request end---':
section = 'General'
continue
elif line == '---response begin---':
section = 'Read'
continue
elif line == '---response end---':
section = 'General'
continue
if section != last_section:
console_write(u"Wget HTTP Debug %s" % section, True)
if section == 'Read':
header_lines.append(line)
console_write(u' ' + line)
last_section = section
else:
for line in output:
if self.skippable_line(line):
continue
# Check the resolving and connecting to lines for errors
if re.match('(Resolving |Connecting to )', line):
failed_match = re.search(' failed: (.*)$', line)
if failed_match:
error = failed_match.group(1).strip()
# Error lines
if line[0:5] == 'wget:':
error = line[5:].strip()
if line[0:7] == 'failed:':
error = line[7:].strip()
if line[0:2] == ' ':
header_lines.append(line.lstrip())
if error:
raise NonHttpError(error)
return self.parse_headers(header_lines)
def skippable_line(self, line):
"""
Determines if a debug line is skippable - usually because of extraneous
or duplicate information.
:param line:
The debug line to check
:return:
True if the line is skippable, otherwise None
"""
# Skip date lines
if re.match('--\d{4}-\d{2}-\d{2}', line):
return True
if re.match('\d{4}-\d{2}-\d{2}', line):
return True
# Skip HTTP status code lines since we already have that info
if re.match('\d{3} ', line):
return True
# Skip Saving to and progress lines
if re.match('(Saving to:|\s*\d+K)', line):
return True
# Skip notice about ignoring body on HTTP error
if re.match('Skipping \d+ byte', line):
return True
def parse_headers(self, output=None):
"""
Parses HTTP headers into two dict objects
:param output:
An array of header lines, if None, loads from temp output file
:return:
A tuple of (general, headers) where general is a dict with the keys:
`version` - HTTP version number (string)
`status` - HTTP status code (integer)
`message` - HTTP status message (string)
And headers is a dict with the keys being lower-case version of the
HTTP header names.
"""
if not output:
with open_compat(self.tmp_file, 'r') as f:
output = read_compat(f).splitlines()
self.clean_tmp_file()
general = {
'version': '0.9',
'status': 200,
'message': 'OK'
}
headers = {}
for line in output:
# When using the -S option, headers have two spaces before them,
# additionally, valid headers won't have spaces, so this is always
# a safe operation to perform
line = line.lstrip()
if line.find('HTTP/') == 0:
match = re.match('HTTP/(\d\.\d)\s+(\d+)(?:\s+(.*))?$', line)
general['version'] = match.group(1)
general['status'] = int(match.group(2))
general['message'] = match.group(3) or ''
else:
name, value = line.split(':', 1)
headers[name.lower()] = value.strip()
return (general, headers)

View File

@@ -0,0 +1,652 @@
from ctypes import windll, wintypes
import ctypes
import time
import re
import datetime
import struct
import locale
wininet = windll.wininet
try:
# Python 3
from urllib.parse import urlparse
except (ImportError):
# Python 2
from urlparse import urlparse
from ..console_write import console_write
from ..unicode import unicode_from_os
from .non_http_error import NonHttpError
from .http_error import HttpError
from .rate_limit_exception import RateLimitException
from .downloader_exception import DownloaderException
from .decoding_downloader import DecodingDownloader
from .limiting_downloader import LimitingDownloader
from .caching_downloader import CachingDownloader
class WinINetDownloader(DecodingDownloader, LimitingDownloader, CachingDownloader):
"""
A downloader that uses the Windows WinINet DLL to perform downloads. This
has the benefit of utilizing system-level proxy configuration and CA certs.
:param settings:
A dict of the various Package Control settings. The Sublime Text
Settings API is not used because this code is run in a thread.
"""
# General constants
ERROR_INSUFFICIENT_BUFFER = 122
# InternetOpen constants
INTERNET_OPEN_TYPE_PRECONFIG = 0
# InternetConnect constants
INTERNET_SERVICE_HTTP = 3
INTERNET_FLAG_EXISTING_CONNECT = 0x20000000
INTERNET_FLAG_IGNORE_REDIRECT_TO_HTTPS = 0x00004000
# InternetSetOption constants
INTERNET_OPTION_CONNECT_TIMEOUT = 2
INTERNET_OPTION_SEND_TIMEOUT = 5
INTERNET_OPTION_RECEIVE_TIMEOUT = 6
# InternetQueryOption constants
INTERNET_OPTION_SECURITY_CERTIFICATE_STRUCT = 32
INTERNET_OPTION_PROXY = 38
INTERNET_OPTION_PROXY_USERNAME = 43
INTERNET_OPTION_PROXY_PASSWORD = 44
INTERNET_OPTION_CONNECTED_STATE = 50
# HttpOpenRequest constants
INTERNET_FLAG_KEEP_CONNECTION = 0x00400000
INTERNET_FLAG_RELOAD = 0x80000000
INTERNET_FLAG_NO_CACHE_WRITE = 0x04000000
INTERNET_FLAG_PRAGMA_NOCACHE = 0x00000100
INTERNET_FLAG_SECURE = 0x00800000
# HttpQueryInfo constants
HTTP_QUERY_RAW_HEADERS_CRLF = 22
# InternetConnectedState constants
INTERNET_STATE_CONNECTED = 1
INTERNET_STATE_DISCONNECTED = 2
INTERNET_STATE_DISCONNECTED_BY_USER = 0x10
INTERNET_STATE_IDLE = 0x100
INTERNET_STATE_BUSY = 0x200
def __init__(self, settings):
self.settings = settings
self.debug = settings.get('debug')
self.network_connection = None
self.tcp_connection = None
self.use_count = 0
self.hostname = None
self.port = None
self.scheme = None
self.was_offline = None
def close(self):
"""
Closes any persistent/open connections
"""
closed = False
changed_state_back = False
if self.tcp_connection:
wininet.InternetCloseHandle(self.tcp_connection)
self.tcp_connection = None
closed = True
if self.network_connection:
wininet.InternetCloseHandle(self.network_connection)
self.network_connection = None
closed = True
if self.was_offline:
dw_connected_state = wintypes.DWORD(self.INTERNET_STATE_DISCONNECTED_BY_USER)
dw_flags = wintypes.DWORD(0)
connected_info = InternetConnectedInfo(dw_connected_state, dw_flags)
wininet.InternetSetOptionA(None,
self.INTERNET_OPTION_CONNECTED_STATE, ctypes.byref(connected_info), ctypes.sizeof(connected_info))
changed_state_back = True
if self.debug:
s = '' if self.use_count == 1 else 's'
console_write(u"WinINet %s Debug General" % self.scheme.upper(), True)
console_write(u" Closing connection to %s on port %s after %s request%s" % (
self.hostname, self.port, self.use_count, s))
if changed_state_back:
console_write(u" Changed Internet Explorer back to Work Offline")
self.hostname = None
self.port = None
self.scheme = None
self.use_count = 0
self.was_offline = None
def download(self, url, error_message, timeout, tries, prefer_cached=False):
"""
Downloads a URL and returns the contents
:param url:
The URL to download
:param error_message:
A string to include in the console error that is printed
when an error occurs
:param timeout:
The int number of seconds to set the timeout to
:param tries:
The int number of times to try and download the URL in the case of
a timeout or HTTP 503 error
:param prefer_cached:
If a cached version should be returned instead of trying a new request
:raises:
RateLimitException: when a rate limit is hit
DownloaderException: when any other download error occurs
:return:
The string contents of the URL
"""
if prefer_cached:
cached = self.retrieve_cached(url)
if cached:
return cached
url_info = urlparse(url)
if not url_info.port:
port = 443 if url_info.scheme == 'https' else 80
hostname = url_info.netloc
else:
port = url_info.port
hostname = url_info.hostname
path = url_info.path
if url_info.params:
path += ';' + url_info.params
if url_info.query:
path += '?' + url_info.query
request_headers = {
'Accept-Encoding': 'gzip,deflate'
}
request_headers = self.add_conditional_headers(url, request_headers)
created_connection = False
# If we switched Internet Explorer out of "Work Offline" mode
changed_to_online = False
# If the user is requesting a connection to another server, close the connection
if (self.hostname and self.hostname != hostname) or (self.port and self.port != port):
self.close()
# Reset the error info to a known clean state
ctypes.windll.kernel32.SetLastError(0)
# Save the internet setup in the class for re-use
if not self.tcp_connection:
created_connection = True
# Connect to the internet if necessary
state = self.read_option(None, self.INTERNET_OPTION_CONNECTED_STATE)
state = ord(state)
if state & self.INTERNET_STATE_DISCONNECTED or state & self.INTERNET_STATE_DISCONNECTED_BY_USER:
# Track the previous state so we can go back once complete
self.was_offline = True
dw_connected_state = wintypes.DWORD(self.INTERNET_STATE_CONNECTED)
dw_flags = wintypes.DWORD(0)
connected_info = InternetConnectedInfo(dw_connected_state, dw_flags)
wininet.InternetSetOptionA(None,
self.INTERNET_OPTION_CONNECTED_STATE, ctypes.byref(connected_info), ctypes.sizeof(connected_info))
changed_to_online = True
self.network_connection = wininet.InternetOpenW(self.settings.get('user_agent'),
self.INTERNET_OPEN_TYPE_PRECONFIG, None, None, 0)
if not self.network_connection:
error_string = u'%s %s during network phase of downloading %s.' % (error_message, self.extract_error(), url)
raise DownloaderException(error_string)
win_timeout = wintypes.DWORD(int(timeout) * 1000)
# Apparently INTERNET_OPTION_CONNECT_TIMEOUT just doesn't work, leaving it in hoping they may fix in the future
wininet.InternetSetOptionA(self.network_connection,
self.INTERNET_OPTION_CONNECT_TIMEOUT, win_timeout, ctypes.sizeof(win_timeout))
wininet.InternetSetOptionA(self.network_connection,
self.INTERNET_OPTION_SEND_TIMEOUT, win_timeout, ctypes.sizeof(win_timeout))
wininet.InternetSetOptionA(self.network_connection,
self.INTERNET_OPTION_RECEIVE_TIMEOUT, win_timeout, ctypes.sizeof(win_timeout))
# Don't allow HTTPS sites to redirect to HTTP sites
tcp_flags = self.INTERNET_FLAG_IGNORE_REDIRECT_TO_HTTPS
# Try to re-use an existing connection to the server
tcp_flags |= self.INTERNET_FLAG_EXISTING_CONNECT
self.tcp_connection = wininet.InternetConnectW(self.network_connection,
hostname, port, None, None, self.INTERNET_SERVICE_HTTP, tcp_flags, 0)
if not self.tcp_connection:
error_string = u'%s %s during connection phase of downloading %s.' % (error_message, self.extract_error(), url)
raise DownloaderException(error_string)
# Normally the proxy info would come from IE, but this allows storing it in
# the Package Control settings file.
proxy_username = self.settings.get('proxy_username')
proxy_password = self.settings.get('proxy_password')
if proxy_username and proxy_password:
username = ctypes.c_wchar_p(proxy_username)
password = ctypes.c_wchar_p(proxy_password)
wininet.InternetSetOptionW(self.tcp_connection,
self.INTERNET_OPTION_PROXY_USERNAME, ctypes.cast(username, ctypes.c_void_p), len(proxy_username))
wininet.InternetSetOptionW(self.tcp_connection,
self.INTERNET_OPTION_PROXY_PASSWORD, ctypes.cast(password, ctypes.c_void_p), len(proxy_password))
self.hostname = hostname
self.port = port
self.scheme = url_info.scheme
else:
if self.debug:
console_write(u"WinINet %s Debug General" % self.scheme.upper(), True)
console_write(u" Re-using connection to %s on port %s for request #%s" % (
self.hostname, self.port, self.use_count))
error_string = None
while tries > 0:
tries -= 1
try:
http_connection = None
# Keep-alive for better performance
http_flags = self.INTERNET_FLAG_KEEP_CONNECTION
# Prevent caching/retrieving from cache
http_flags |= self.INTERNET_FLAG_RELOAD
http_flags |= self.INTERNET_FLAG_NO_CACHE_WRITE
http_flags |= self.INTERNET_FLAG_PRAGMA_NOCACHE
# Use SSL
if self.scheme == 'https':
http_flags |= self.INTERNET_FLAG_SECURE
http_connection = wininet.HttpOpenRequestW(self.tcp_connection, u'GET', path, u'HTTP/1.1', None, None, http_flags, 0)
if not http_connection:
error_string = u'%s %s during HTTP connection phase of downloading %s.' % (error_message, self.extract_error(), url)
raise DownloaderException(error_string)
request_header_lines = []
for header, value in request_headers.items():
request_header_lines.append(u"%s: %s" % (header, value))
request_header_lines = u"\r\n".join(request_header_lines)
success = wininet.HttpSendRequestW(http_connection, request_header_lines, len(request_header_lines), None, 0)
if not success:
error_string = u'%s %s during HTTP write phase of downloading %s.' % (error_message, self.extract_error(), url)
raise DownloaderException(error_string)
# If we try to query before here, the proxy info will not be available to the first request
if self.debug:
proxy_struct = self.read_option(self.network_connection, self.INTERNET_OPTION_PROXY)
proxy = ''
if proxy_struct.lpszProxy:
proxy = proxy_struct.lpszProxy.decode('cp1252')
proxy_bypass = ''
if proxy_struct.lpszProxyBypass:
proxy_bypass = proxy_struct.lpszProxyBypass.decode('cp1252')
proxy_username = self.read_option(self.tcp_connection, self.INTERNET_OPTION_PROXY_USERNAME)
proxy_password = self.read_option(self.tcp_connection, self.INTERNET_OPTION_PROXY_PASSWORD)
console_write(u"WinINet Debug Proxy", True)
console_write(u" proxy: %s" % proxy)
console_write(u" proxy bypass: %s" % proxy_bypass)
console_write(u" proxy username: %s" % proxy_username)
console_write(u" proxy password: %s" % proxy_password)
self.use_count += 1
if self.debug and created_connection:
if self.scheme == 'https':
cert_struct = self.read_option(http_connection, self.INTERNET_OPTION_SECURITY_CERTIFICATE_STRUCT)
if cert_struct.lpszIssuerInfo:
issuer_info = cert_struct.lpszIssuerInfo.decode('cp1252')
issuer_parts = issuer_info.split("\r\n")
else:
issuer_parts = ['No issuer info']
if cert_struct.lpszSubjectInfo:
subject_info = cert_struct.lpszSubjectInfo.decode('cp1252')
subject_parts = subject_info.split("\r\n")
else:
subject_parts = ["No subject info"]
common_name = subject_parts[-1]
if cert_struct.ftStart.dwLowDateTime != 0 and cert_struct.ftStart.dwHighDateTime != 0:
issue_date = self.convert_filetime_to_datetime(cert_struct.ftStart)
issue_date = issue_date.strftime('%a, %d %b %Y %H:%M:%S GMT')
else:
issue_date = u"No issue date"
if cert_struct.ftExpiry.dwLowDateTime != 0 and cert_struct.ftExpiry.dwHighDateTime != 0:
expiration_date = self.convert_filetime_to_datetime(cert_struct.ftExpiry)
expiration_date = expiration_date.strftime('%a, %d %b %Y %H:%M:%S GMT')
else:
expiration_date = u"No expiration date"
console_write(u"WinINet HTTPS Debug General", True)
if changed_to_online:
console_write(u" Internet Explorer was set to Work Offline, temporarily going online")
console_write(u" Server SSL Certificate:")
console_write(u" subject: %s" % ", ".join(subject_parts))
console_write(u" issuer: %s" % ", ".join(issuer_parts))
console_write(u" common name: %s" % common_name)
console_write(u" issue date: %s" % issue_date)
console_write(u" expire date: %s" % expiration_date)
elif changed_to_online:
console_write(u"WinINet HTTP Debug General", True)
console_write(u" Internet Explorer was set to Work Offline, temporarily going online")
if self.debug:
console_write(u"WinINet %s Debug Write" % self.scheme.upper(), True)
# Add in some known headers that WinINet sends since we can't get the real list
console_write(u" GET %s HTTP/1.1" % path)
for header, value in request_headers.items():
console_write(u" %s: %s" % (header, value))
console_write(u" User-Agent: %s" % self.settings.get('user_agent'))
console_write(u" Host: %s" % hostname)
console_write(u" Connection: Keep-Alive")
console_write(u" Cache-Control: no-cache")
header_buffer_size = 8192
try_again = True
while try_again:
try_again = False
to_read_was_read = wintypes.DWORD(header_buffer_size)
headers_buffer = ctypes.create_string_buffer(header_buffer_size)
success = wininet.HttpQueryInfoA(http_connection, self.HTTP_QUERY_RAW_HEADERS_CRLF, ctypes.byref(headers_buffer), ctypes.byref(to_read_was_read), None)
if not success:
if ctypes.GetLastError() != self.ERROR_INSUFFICIENT_BUFFER:
error_string = u'%s %s during header read phase of downloading %s.' % (error_message, self.extract_error(), url)
raise DownloaderException(error_string)
# The error was a buffer that was too small, so try again
header_buffer_size = to_read_was_read.value
try_again = True
continue
headers = b''
if to_read_was_read.value > 0:
headers += headers_buffer.raw[:to_read_was_read.value]
headers = headers.decode('iso-8859-1').rstrip("\r\n").split("\r\n")
if self.debug:
console_write(u"WinINet %s Debug Read" % self.scheme.upper(), True)
for header in headers:
console_write(u" %s" % header)
buffer_length = 65536
output_buffer = ctypes.create_string_buffer(buffer_length)
bytes_read = wintypes.DWORD()
result = b''
try_again = True
while try_again:
try_again = False
wininet.InternetReadFile(http_connection, output_buffer, buffer_length, ctypes.byref(bytes_read))
if bytes_read.value > 0:
result += output_buffer.raw[:bytes_read.value]
try_again = True
general, headers = self.parse_headers(headers)
self.handle_rate_limit(headers, url)
if general['status'] == 503 and tries != 0:
# GitHub and BitBucket seem to rate limit via 503
error_string = u'Downloading %s was rate limited' % url
if tries:
error_string += ', trying again'
if self.debug:
console_write(error_string, True)
continue
encoding = headers.get('content-encoding')
if encoding:
result = self.decode_response(encoding, result)
result = self.cache_result('get', url, general['status'],
headers, result)
if general['status'] not in [200, 304]:
raise HttpError("HTTP error %s" % general['status'], general['status'])
return result
except (NonHttpError, HttpError) as e:
# GitHub and BitBucket seem to time out a lot
if str(e).find('timed out') != -1:
error_string = u'Downloading %s timed out' % url
if tries:
error_string += ', trying again'
if self.debug:
console_write(error_string, True)
continue
error_string = u'%s %s downloading %s.' % (error_message, e, url)
finally:
if http_connection:
wininet.InternetCloseHandle(http_connection)
break
raise DownloaderException(error_string)
def convert_filetime_to_datetime(self, filetime):
"""
Windows returns times as 64-bit unsigned longs that are the number
of hundreds of nanoseconds since Jan 1 1601. This converts it to
a datetime object.
:param filetime:
A FileTime struct object
:return:
A (UTC) datetime object
"""
hundreds_nano_seconds = struct.unpack('>Q', struct.pack('>LL', filetime.dwHighDateTime, filetime.dwLowDateTime))[0]
seconds_since_1601 = hundreds_nano_seconds / 10000000
epoch_seconds = seconds_since_1601 - 11644473600 # Seconds from Jan 1 1601 to Jan 1 1970
return datetime.datetime.fromtimestamp(epoch_seconds)
def extract_error(self):
"""
Retrieves and formats an error from WinINet
:return:
A string with a nice description of the error
"""
error_num = ctypes.GetLastError()
raw_error_string = ctypes.FormatError(error_num)
error_string = unicode_from_os(raw_error_string)
# Try to fill in some known errors
if error_string == u"<no description>":
error_lookup = {
12007: u'host not found',
12029: u'connection refused',
12057: u'error checking for server certificate revocation',
12169: u'invalid secure certificate',
12157: u'secure channel error, server not providing SSL',
12002: u'operation timed out'
}
if error_num in error_lookup:
error_string = error_lookup[error_num]
if error_string == u"<no description>":
return u"(errno %s)" % error_num
error_string = error_string[0].upper() + error_string[1:]
return u"%s (errno %s)" % (error_string, error_num)
def supports_ssl(self):
"""
Indicates if the object can handle HTTPS requests
:return:
If the object supports HTTPS requests
"""
return True
def read_option(self, handle, option):
"""
Reads information about the internet connection, which may be a string or struct
:param handle:
The handle to query for the info
:param option:
The (int) option to get
:return:
A string, or one of the InternetCertificateInfo or InternetProxyInfo structs
"""
option_buffer_size = 8192
try_again = True
while try_again:
try_again = False
to_read_was_read = wintypes.DWORD(option_buffer_size)
option_buffer = ctypes.create_string_buffer(option_buffer_size)
ref = ctypes.byref(option_buffer)
success = wininet.InternetQueryOptionA(handle, option, ref, ctypes.byref(to_read_was_read))
if not success:
if ctypes.GetLastError() != self.ERROR_INSUFFICIENT_BUFFER:
raise NonHttpError(self.extract_error())
# The error was a buffer that was too small, so try again
option_buffer_size = to_read_was_read.value
try_again = True
continue
if option == self.INTERNET_OPTION_SECURITY_CERTIFICATE_STRUCT:
length = min(len(option_buffer), ctypes.sizeof(InternetCertificateInfo))
cert_info = InternetCertificateInfo()
ctypes.memmove(ctypes.addressof(cert_info), option_buffer, length)
return cert_info
elif option == self.INTERNET_OPTION_PROXY:
length = min(len(option_buffer), ctypes.sizeof(InternetProxyInfo))
proxy_info = InternetProxyInfo()
ctypes.memmove(ctypes.addressof(proxy_info), option_buffer, length)
return proxy_info
else:
option = b''
if to_read_was_read.value > 0:
option += option_buffer.raw[:to_read_was_read.value]
return option.decode('cp1252').rstrip("\x00")
def parse_headers(self, output):
"""
Parses HTTP headers into two dict objects
:param output:
An array of header lines
:return:
A tuple of (general, headers) where general is a dict with the keys:
`version` - HTTP version number (string)
`status` - HTTP status code (integer)
`message` - HTTP status message (string)
And headers is a dict with the keys being lower-case version of the
HTTP header names.
"""
general = {
'version': '0.9',
'status': 200,
'message': 'OK'
}
headers = {}
for line in output:
line = line.lstrip()
if line.find('HTTP/') == 0:
match = re.match('HTTP/(\d\.\d)\s+(\d+)\s+(.*)$', line)
general['version'] = match.group(1)
general['status'] = int(match.group(2))
general['message'] = match.group(3)
else:
name, value = line.split(':', 1)
headers[name.lower()] = value.strip()
return (general, headers)
class FileTime(ctypes.Structure):
"""
A Windows struct used by InternetCertificateInfo for certificate
date information
"""
_fields_ = [
("dwLowDateTime", wintypes.DWORD),
("dwHighDateTime", wintypes.DWORD)
]
class InternetCertificateInfo(ctypes.Structure):
"""
A Windows struct used to store information about an SSL certificate
"""
_fields_ = [
("ftExpiry", FileTime),
("ftStart", FileTime),
("lpszSubjectInfo", ctypes.c_char_p),
("lpszIssuerInfo", ctypes.c_char_p),
("lpszProtocolName", ctypes.c_char_p),
("lpszSignatureAlgName", ctypes.c_char_p),
("lpszEncryptionAlgName", ctypes.c_char_p),
("dwKeySize", wintypes.DWORD)
]
class InternetProxyInfo(ctypes.Structure):
"""
A Windows struct usd to store information about the configured proxy server
"""
_fields_ = [
("dwAccessType", wintypes.DWORD),
("lpszProxy", ctypes.c_char_p),
("lpszProxyBypass", ctypes.c_char_p)
]
class InternetConnectedInfo(ctypes.Structure):
"""
A Windows struct usd to store information about the global internet connection state
"""
_fields_ = [
("dwConnectedState", wintypes.DWORD),
("dwFlags", wintypes.DWORD)
]