Merge pull request #332 from learnleapfly/fix_urlretrieve

Fix urlretrieve issue on python2
This commit is contained in:
Mathieu Virbel 2019-01-31 13:18:08 +01:00 committed by GitHub
commit 88e0001ab4
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
2 changed files with 40 additions and 3 deletions

View file

@ -1 +1,2 @@
pbxproj==2.5.1
requests>=2.13

View file

@ -18,16 +18,18 @@ import json
import shutil
import fnmatch
import tempfile
import time
from datetime import datetime
try:
from urllib.request import FancyURLopener, urlcleanup
except ImportError:
from urllib import FancyURLopener, urlcleanup
try:
from pbxproj import XcodeProject
from pbxproj.pbxextensions.ProjectFiles import FileOptions
except ImportError:
print("ERROR: pbxproj requirements is missing")
print("ERROR: Python requirements are missing")
print("To install: pip install -r requirements.txt")
sys.exit(0)
curdir = dirname(__file__)
@ -37,6 +39,7 @@ import sh
IS_PY3 = sys.version_info[0] >= 3
IS_PY2 = sys.version_info[0] == 2
def shprint(command, *args, **kwargs):
@ -63,7 +66,6 @@ def cache_execution(f):
self.update_state(key, True)
return _cache_execution
class ChromeDownloader(FancyURLopener):
version = (
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 '
@ -451,7 +453,41 @@ class Recipe(object):
urlcleanup()
print('Downloading {0}'.format(url))
urlretrieve(url, filename, report_hook)
attempts = 0
while True:
try:
urlretrieve(url, filename, report_hook)
except AttributeError:
if IS_PY2:
# This is caused by bug in python-future, causing occasional
# AttributeError: '_fileobject' object has no attribute 'readinto'
# It can be removed if the upstream fix is accepted. See also:
# * https://github.com/kivy/kivy-ios/issues/322
# * https://github.com/PythonCharmers/python-future/pull/423
import requests
print("Warning: urlretrieve failed. Falling back to request")
headers = {'User-agent': 'Mozilla/5.0 (X11; Linux x86_64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/28.0.1500.71 Safari/537.36'}
r = requests.get(url, headers=headers)
with open(filename, "wb") as fw:
fw.write(r.content)
break
else:
raise
except OSError as e:
attempts += 1
if attempts >= 5:
print('Max download attempts reached: {}'.format(attempts))
raise e
print('Download failed. Retrying in 1 second...')
time.sleep(1)
continue
break
return filename
def extract_file(self, filename, cwd):