Downloading was broken in certain cases on python 2.

FancyUrlopener.urlretrieve was failing on redirect, but in an insidious way, because of a bug in the python-future module:
https://github.com/PythonCharmers/python-future/issues/425
Replace with the requests module for the time being.
This commit is contained in:
Kjell Wooding 2019-01-09 15:48:41 -05:00
parent 1d85617b76
commit 999dc68123
2 changed files with 12 additions and 18 deletions

View file

@ -1 +1,2 @@
pbxproj==2.5.1 pbxproj==2.5.1
requests>=2.13

View file

@ -20,14 +20,11 @@ import fnmatch
import tempfile import tempfile
from datetime import datetime from datetime import datetime
try: try:
from urllib.request import FancyURLopener, urlcleanup import requests
except ImportError:
from urllib import FancyURLopener, urlcleanup
try:
from pbxproj import XcodeProject from pbxproj import XcodeProject
from pbxproj.pbxextensions.ProjectFiles import FileOptions from pbxproj.pbxextensions.ProjectFiles import FileOptions
except ImportError: except ImportError:
print("ERROR: pbxproj requirements is missing") print("ERROR: Python requirements are missing")
print("To install: pip install -r requirements.txt") print("To install: pip install -r requirements.txt")
sys.exit(0) sys.exit(0)
curdir = dirname(__file__) curdir = dirname(__file__)
@ -65,15 +62,6 @@ def cache_execution(f):
state[key_time] = str(datetime.utcnow()) state[key_time] = str(datetime.utcnow())
return _cache_execution return _cache_execution
class ChromeDownloader(FancyURLopener):
version = (
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/28.0.1500.71 Safari/537.36')
urlretrieve = ChromeDownloader().retrieve
class JsonStore(object): class JsonStore(object):
"""Replacement of shelve using json, needed for support python 2 and 3. """Replacement of shelve using json, needed for support python 2 and 3.
""" """
@ -468,11 +456,16 @@ class Recipe(object):
if exists(filename): if exists(filename):
unlink(filename) unlink(filename)
# Clean up temporary files just in case before downloading.
urlcleanup()
print('Downloading {0}'.format(url)) print('Downloading {0}'.format(url))
urlretrieve(url, filename, report_hook) headers = {'User-agent': 'Mozilla/5.0 (X11; Linux x86_64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/28.0.1500.71 Safari/537.36'}
r = requests.get(url, headers=headers)
with open(filename, "wb") as fw:
fw.write(r.content)
return filename return filename
def extract_file(self, filename, cwd): def extract_file(self, filename, cwd):