cg2glsl no longer needed

This commit is contained in:
zeromus 2015-09-28 23:38:21 -05:00
parent 69d0d813eb
commit 764befa698
27 changed files with 0 additions and 8162 deletions

Binary file not shown.

Binary file not shown.

View File

@ -1,10 +0,0 @@
rem http://http.developer.nvidia.com/Cg/cgc.html
rem https://developer.nvidia.com/cg-toolkit
rem https://github.com/Themaister/RetroArch/blob/master/tools/cg2glsl.py
rem https://github.com/IronLanguages/main/blob/master/Languages/IronPython/Public/Tools/Scripts/pyc.py
"C:\Program Files (x86)\IronPython 2.7\ipy" "C:\Program Files (x86)\IronPython 2.7\tools\scripts\pyc.py" /embed /standalone /target:exe /main:cg2glsl.py lib\_abcoll.py lib\_weakrefset.py lib\abc.py lib\bisect.py lib\collections.py lib\genericpath.py lib\heapq.py lib\keyword.py lib\linecache.py lib\ntpath.py lib\os.py lib\stat.py lib\subprocess.py lib\threading.py lib\traceback.py lib\types.py lib\UserDict.py lib\warnings.py
move /y cg2glsl.exe ..
copy /y cgc.exe ..
copy /y cg.dll ..
copy /y cgGL.dll ..

Binary file not shown.

View File

@ -1,756 +0,0 @@
#!/usr/bin/env python3
"""
Python 3 script which converts simple RetroArch Cg shaders to modern GLSL (ES) format.
Author: Hans-Kristian Arntzen (Themaister)
License: Public domain
"""
import sys
import os
import errno
import subprocess
batch_mode = False
def log(*arg):
if not batch_mode:
print(arg)
def remove_comments(source_lines):
ret = []
killed_comments = [line.split('//')[0] for line in source_lines]
for i in filter(lambda line: len(line) > 0, killed_comments):
ret.append(i)
return ret
def keep_line_if(func, lines):
ret = []
for i in filter(func, lines):
ret.append(i)
return ret
def replace_global_in(source):
split_source = source.split('\n')
replace_table = [
('IN.video_size', 'InputSize'),
('IN.texture_size', 'TextureSize'),
('IN.output_size', 'OutputSize'),
('IN.frame_count', 'FrameCount'),
('IN.frame_direction', 'FrameDirection'),
]
for line in split_source:
if ('//var' in line) or ('#var' in line):
for index, replace in enumerate(replace_table):
orig = line.split(' ')[2]
if replace[0] == orig:
replace_table[index] = (line.split(':')[2].split(' ')[1], replace_table[index][1])
log('Replace globals:', replace_table)
for replace in replace_table:
if replace[0]:
source = source.replace(replace[0], replace[1])
return source
def replace_global_vertex(source):
source = replace_global_in(source)
replace_table = [
('attribute', 'COMPAT_ATTRIBUTE'),
('varying', 'COMPAT_VARYING'),
('texture2D', 'COMPAT_TEXTURE'),
('POSITION', 'VertexCoord'),
('TEXCOORD1', 'LUTTexCoord'),
('TEXCOORD0', 'TexCoord'),
('TEXCOORD', 'TexCoord'),
('uniform vec4 _modelViewProj1[4];', ''),
('_modelViewProj1', 'MVPMatrix'),
('_IN1._mvp_matrix[0]', 'MVPMatrix[0]'),
('_IN1._mvp_matrix[1]', 'MVPMatrix[1]'),
('_IN1._mvp_matrix[2]', 'MVPMatrix[2]'),
('_IN1._mvp_matrix[3]', 'MVPMatrix[3]'),
('FrameCount', 'float(FrameCount)'),
('FrameDirection', 'float(FrameDirection)'),
('input', 'input_dummy'), # 'input' is reserved in GLSL.
('output', 'output_dummy'), # 'output' is reserved in GLSL.
]
for replacement in replace_table:
source = source.replace(replacement[0], replacement[1])
return source
def translate_varyings(varyings, source, direction):
dictionary = {}
for varying in varyings:
for line in source:
if (varying in line) and (('//var' in line) or ('#var' in line)) and (direction in line):
log('Found line for', varying + ':', line)
dictionary[varying] = 'VAR' + line.split(':')[0].split('.')[-1].strip()
break
return dictionary
def no_uniform(elem):
banned = [
'_video_size',
'_texture_size',
'_output_size',
'_output_dummy_size',
'_frame_count',
'_frame_direction',
'_mvp_matrix',
'_vertex_coord',
'sampler2D'
]
for ban in banned:
if ban in elem:
return False
return True
def destructify_varyings(source, direction):
# We have to change varying structs that Cg support to single varyings for GL.
# Varying structs aren't supported until later versions
# of GLSL.
# Global structs are sometimes used to store temporary data.
# Don't try to remove this as it breaks compile.
vout_lines = []
for line in source:
if (('//var' in line) or ('#var' in line)) and (('$vout.' in line) or ('$vin.' in line)):
vout_lines.append(line)
struct_types = []
for line in source[1:]:
if 'struct' in line:
struct_type = line.split(' ')[1]
if struct_type not in struct_types:
struct_types.append(struct_type)
log('Struct types:', struct_types)
last_struct_decl_line = 0
varyings = []
varyings_name = []
# Find all varyings in structs and make them "global" varyings.
for struct in struct_types:
for i, line in enumerate(source):
if ('struct ' + struct) in line:
j = i + 1
while (j < len(source)) and ('};' not in source[j]):
j += 1
lines = ['COMPAT_VARYING ' + string for string in source[i + 1 : j]]
varyings.extend(lines)
names = [string.strip().split(' ')[1].split(';')[0].strip() for string in source[i + 1 : j]]
varyings_name.extend(names)
log('Found elements in struct', struct + ':', names)
last_struct_decl_line = j
# Must have explicit uniform sampler2D in struct.
for index in range(i, j + 1):
if 'sampler2D' in source[index]:
source[index] = 'float _placeholder{};'.format(index)
varyings_tmp = varyings
varyings = []
variables = []
# Don't include useless varyings like IN.video_size, IN.texture_size, etc as they are not actual varyings ...
for i in filter(no_uniform, varyings_tmp):
varyings.append(i)
# Find any global variable struct that is supposed to be the output varying, and redirect all references to it to
# the actual varyings we just declared ...
# Globals only come before main() ...
# Make sure to only look after all struct declarations as there might be overlap.
for line in source[last_struct_decl_line:]:
if 'void main()' in line:
break
for struct in struct_types:
if struct in line:
decomment_line = line.split('//')[0].strip()
if len(decomment_line) == 0:
continue
variable = decomment_line.split(' ')[1].split(';')[0]
# Only redirect if the struct is actually used as vertex output.
for vout_line in vout_lines:
if variable in vout_line:
log('Found struct variable for', struct + ':', variable, 'in line:', line)
variables.append(variable)
break
varyings_dict = translate_varyings(varyings_name, source, direction)
log('Varyings dict:', varyings_dict)
# Append all varyings. Keep the structs as they might be used as regular values.
for varying in varyings:
source.insert(1, varying)
log('Variables:', variables)
log('Varying names:', varyings_name)
# Replace struct access with global access, e.g. (_co1._c00 => _c00)
# Also replace mangled Cg name with 'real' name.
for index, _ in enumerate(source):
for variable in variables:
for varying_name in varyings_dict:
trans_from = variable + '.' + varying_name
trans_to = varyings_dict[varying_name]
source[index] = source[index].replace(trans_from, trans_to);
for index, _ in enumerate(source):
for varying_name in varyings_name:
if varying_name in varyings_dict:
source[index] = source[index].replace(varying_name, varyings_dict[varying_name])
# Replace union <struct>. Sometimes we get collision in vertex and fragment.
for index, line in enumerate(source):
for struct_type in struct_types:
line = line.replace('uniform ' + struct_type, struct_type)
source[index] = line
return source
def translate_varying(cg):
# Ye, it's ugly as shit. :(
#log('Translate:', cg)
translations = {
'IN.tex_coord' : 'TexCoord',
'IN.vertex_coord' : 'VertexCoord',
'IN.lut_tex_coord' : 'LUTTexCoord',
'ORIG.tex_coord' : 'OrigTexCoord',
'PREV.tex_coord' : 'PrevTexCoord',
'PREV1.tex_coord' : 'Prev1TexCoord',
'PREV2.tex_coord' : 'Prev2TexCoord',
'PREV3.tex_coord' : 'Prev3TexCoord',
'PREV4.tex_coord' : 'Prev4TexCoord',
'PREV5.tex_coord' : 'Prev5TexCoord',
'PREV6.tex_coord' : 'Prev6TexCoord',
'PASS1.tex_coord' : 'Pass1TexCoord',
'PASS2.tex_coord' : 'Pass2TexCoord',
'PASS3.tex_coord' : 'Pass3TexCoord',
'PASS4.tex_coord' : 'Pass4TexCoord',
'PASS5.tex_coord' : 'Pass5TexCoord',
'PASS6.tex_coord' : 'Pass6TexCoord',
'PASS7.tex_coord' : 'Pass7TexCoord',
'PASS8.tex_coord' : 'Pass8TexCoord',
'PASSPREV2.tex_coord' : 'PassPrev2TexCoord',
'PASSPREV3.tex_coord' : 'PassPrev3TexCoord',
'PASSPREV4.tex_coord' : 'PassPrev4TexCoord',
'PASSPREV5.tex_coord' : 'PassPrev5TexCoord',
'PASSPREV6.tex_coord' : 'PassPrev6TexCoord',
'PASSPREV7.tex_coord' : 'PassPrev7TexCoord',
'PASSPREV8.tex_coord' : 'PassPrev8TexCoord',
}
if cg in translations:
return translations[cg]
else:
return cg
def translate_texture_size(cg):
# Ye, it's ugly as shit. :(
#log('Translate:', cg)
translations = {
'ORIG.texture_size' : 'OrigTextureSize',
'PREV.texture_size' : 'PrevTextureSize',
'PREV1.texture_size' : 'Prev1TextureSize',
'PREV2.texture_size' : 'Prev2TextureSize',
'PREV3.texture_size' : 'Prev3TextureSize',
'PREV4.texture_size' : 'Prev4TextureSize',
'PREV5.texture_size' : 'Prev5TextureSize',
'PREV6.texture_size' : 'Prev6TextureSize',
'PASS1.texture_size' : 'Pass1TextureSize',
'PASS2.texture_size' : 'Pass2TextureSize',
'PASS3.texture_size' : 'Pass3TextureSize',
'PASS4.texture_size' : 'Pass4TextureSize',
'PASS5.texture_size' : 'Pass5TextureSize',
'PASS6.texture_size' : 'Pass6TextureSize',
'PASS7.texture_size' : 'Pass7TextureSize',
'PASS8.texture_size' : 'Pass8TextureSize',
'PASSPREV2.texture_size' : 'PassPrev2TextureSize',
'PASSPREV3.texture_size' : 'PassPrev3TextureSize',
'PASSPREV4.texture_size' : 'PassPrev4TextureSize',
'PASSPREV5.texture_size' : 'PassPrev5TextureSize',
'PASSPREV6.texture_size' : 'PassPrev6TextureSize',
'PASSPREV7.texture_size' : 'PassPrev7TextureSize',
'PASSPREV8.texture_size' : 'PassPrev8TextureSize',
'ORIG.video_size' : 'OrigInputSize',
'PREV.video_size' : 'PrevInputSize',
'PREV1.video_size' : 'Prev1InputSize',
'PREV2.video_size' : 'Prev2InputSize',
'PREV3.video_size' : 'Prev3InputSize',
'PREV4.video_size' : 'Prev4InputSize',
'PREV5.video_size' : 'Prev5InputSize',
'PREV6.video_size' : 'Prev6InputSize',
'PASS1.video_size' : 'Pass1InputSize',
'PASS2.video_size' : 'Pass2InputSize',
'PASS3.video_size' : 'Pass3InputSize',
'PASS4.video_size' : 'Pass4InputSize',
'PASS5.video_size' : 'Pass5InputSize',
'PASS6.video_size' : 'Pass6InputSize',
'PASS7.video_size' : 'Pass7InputSize',
'PASS8.video_size' : 'Pass8InputSize',
'PASSPREV2.video_size' : 'PassPrev2InputSize',
'PASSPREV3.video_size' : 'PassPrev3InputSize',
'PASSPREV4.video_size' : 'PassPrev4InputSize',
'PASSPREV5.video_size' : 'PassPrev5InputSize',
'PASSPREV6.video_size' : 'PassPrev6InputSize',
'PASSPREV7.video_size' : 'PassPrev7InputSize',
'PASSPREV8.video_size' : 'PassPrev8InputSize',
}
if cg in translations:
return translations[cg]
else:
return cg
def replace_varyings(source):
ret = []
translations = []
attribs = []
uniforms = []
for index, line in enumerate(source):
if (('//var' in line) or ('#var' in line)) and ('$vin.' in line):
orig = line.split(' ')[2]
translated = translate_varying(orig)
if translated != orig and translated not in attribs:
cg_attrib = line.split(':')[2].split(' ')[1]
if len(cg_attrib.strip()) > 0:
translations.append((cg_attrib, translated))
attribs.append(translated)
elif ('//var' in line) or ('#var' in line):
orig = line.split(' ')[2]
translated = translate_texture_size(orig)
if translated != orig and translated not in uniforms:
cg_uniform = line.split(':')[2].split(' ')[1]
if len(cg_uniform.strip()) > 0:
translations.append((cg_uniform, translated))
uniforms.append(translated)
for index, line in enumerate(source):
if 'void main()' in line:
for attrib in attribs:
if attrib == 'VertexCoord':
source.insert(index, 'COMPAT_ATTRIBUTE vec4 ' + attrib + ';')
else:
source.insert(index, 'COMPAT_ATTRIBUTE vec2 ' + attrib + ';')
for uniform in uniforms:
source.insert(index, 'uniform COMPAT_PRECISION vec2 ' + uniform + ';')
break
for line in source:
for trans in translations:
line = line.replace(trans[0], trans[1])
ret.append(line)
return ret
def hack_source_vertex(source):
ref_index = 0
for index, line in enumerate(source):
if 'void main()' in line:
source.insert(index, 'uniform COMPAT_PRECISION vec2 InputSize;')
source.insert(index, 'uniform COMPAT_PRECISION vec2 TextureSize;')
source.insert(index, 'uniform COMPAT_PRECISION vec2 OutputSize;')
source.insert(index, 'uniform int FrameCount;')
source.insert(index, 'uniform int FrameDirection;')
source.insert(index, 'uniform mat4 MVPMatrix;')
ref_index = index
break
# Fix samplers in vertex shader (supported by GLSL).
translations = []
added_samplers = []
translated_samplers = []
struct_texunit0 = False # If True, we have to append uniform sampler2D Texture manually ...
for line in source:
if ('TEXUNIT0' in line) and ('semantic' not in line):
main_sampler = (line.split(':')[2].split(' ')[1], 'Texture')
if len(main_sampler[0]) > 0:
translations.append(main_sampler)
log('Vertex: Sampler:', main_sampler[0], '->', main_sampler[1])
struct_texunit0 = '.' in main_sampler[0]
elif ('//var sampler2D' in line) or ('#var sampler2D' in line):
cg_texture = line.split(' ')[2]
translated = translate_texture(cg_texture)
orig_name = translated
new_name = line.split(':')[2].split(' ')[1]
log('Vertex: Sampler:', new_name, '->', orig_name)
if len(new_name) > 0:
if translated != cg_texture and translated not in translated_samplers:
translated_samplers.append(translated)
added_samplers.append('uniform sampler2D ' + translated + ';')
translations.append((new_name, orig_name))
for sampler in added_samplers:
source.insert(ref_index, sampler)
if struct_texunit0:
source.insert(ref_index, 'uniform sampler2D Texture;')
for index, line in enumerate(source):
for translation in translations:
source[index] = source[index].replace(translation[0], translation[1])
source = destructify_varyings(source, '$vout.')
source = replace_varyings(source)
return source
def replace_global_fragment(source):
source = replace_global_in(source)
replace_table = [
('varying', 'COMPAT_VARYING'),
('texture2D', 'COMPAT_TEXTURE'),
('FrameCount', 'float(FrameCount)'),
('FrameDirection', 'float(FrameDirection)'),
('input', 'input_dummy'),
('output', 'output_dummy'), # 'output' is reserved in GLSL.
('gl_FragColor', 'FragColor'),
]
for replacement in replace_table:
source = source.replace(replacement[0], replacement[1])
return source
def translate_texture(cg):
log('Translate:', cg)
translations = {
'ORIG.texture' : 'OrigTexture',
'PREV.texture' : 'PrevTexture',
'PREV1.texture' : 'Prev1Texture',
'PREV2.texture' : 'Prev2Texture',
'PREV3.texture' : 'Prev3Texture',
'PREV4.texture' : 'Prev4Texture',
'PREV5.texture' : 'Prev5Texture',
'PREV6.texture' : 'Prev6Texture',
'PASS1.texture' : 'Pass1Texture',
'PASS2.texture' : 'Pass2Texture',
'PASS3.texture' : 'Pass3Texture',
'PASS4.texture' : 'Pass4Texture',
'PASS5.texture' : 'Pass5Texture',
'PASS6.texture' : 'Pass6Texture',
'PASS7.texture' : 'Pass7Texture',
'PASS8.texture' : 'Pass8Texture',
'PASSPREV2.texture' : 'PassPrev2Texture',
'PASSPREV3.texture' : 'PassPrev3Texture',
'PASSPREV4.texture' : 'PassPrev4Texture',
'PASSPREV5.texture' : 'PassPrev5Texture',
'PASSPREV6.texture' : 'PassPrev6Texture',
'PASSPREV7.texture' : 'PassPrev7Texture',
'PASSPREV8.texture' : 'PassPrev8Texture',
}
if cg in translations:
return translations[cg]
else:
return cg
def hack_source_fragment(source):
ref_index = 0
for index, line in enumerate(source):
if 'void main()' in line:
source.insert(index, 'uniform COMPAT_PRECISION vec2 InputSize;')
source.insert(index, 'uniform COMPAT_PRECISION vec2 TextureSize;')
source.insert(index, 'uniform COMPAT_PRECISION vec2 OutputSize;')
source.insert(index, 'uniform int FrameCount;')
source.insert(index, 'uniform int FrameDirection;')
ref_index = index
break
translations = []
added_samplers = []
translated_samplers = []
uniforms = []
struct_texunit0 = False # If True, we have to append uniform sampler2D Texture manually ...
for line in source:
if ('TEXUNIT0' in line) and ('semantic' not in line):
main_sampler = (line.split(':')[2].split(' ')[1], 'Texture')
if len(main_sampler[0]) > 0:
translations.append(main_sampler)
log('Fragment: Sampler:', main_sampler[0], '->', main_sampler[1])
struct_texunit0 = '.' in main_sampler[0]
elif ('//var sampler2D' in line) or ('#var sampler2D' in line):
cg_texture = line.split(' ')[2]
translated = translate_texture(cg_texture)
orig_name = translated
new_name = line.split(':')[2].split(' ')[1]
log('Fragment: Sampler:', new_name, '->', orig_name)
if len(new_name) > 0:
if translated != cg_texture and translated not in translated_samplers:
translated_samplers.append(translated)
added_samplers.append('uniform sampler2D ' + translated + ';')
translations.append((new_name, orig_name))
elif ('//var' in line) or ('#var' in line):
orig = line.split(' ')[2]
translated = translate_texture_size(orig)
if translated != orig and translated not in uniforms:
cg_uniform = line.split(':')[2].split(' ')[1]
if len(cg_uniform.strip()) > 0:
translations.append((cg_uniform, translated))
uniforms.append(translated)
for sampler in added_samplers:
source.insert(ref_index, sampler)
for uniform in uniforms:
source.insert(ref_index, 'uniform COMPAT_PRECISION vec2 ' + uniform + ';')
if struct_texunit0:
source.insert(ref_index, 'uniform sampler2D Texture;')
ret = []
for line in source:
for translation in translations:
log('Translation:', translation[0], '->', translation[1])
line = line.replace(translation[0], translation[1])
ret.append(line)
ret = destructify_varyings(ret, '$vin.')
return ret
def validate_shader(source, target):
log('Shader:')
log('===')
log(source)
log('===')
command = ['cgc', '-noentry', '-ogles']
p = subprocess.Popen(command, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
stdout_ret, stderr_ret = p.communicate(source.encode())
log('CGC:', stderr_ret.decode())
return p.returncode == 0
def preprocess_vertex(source_data):
input_data = source_data.split('\n')
ret = []
for line in input_data:
if 'uniform float4x4' in line:
ret.append('#pragma pack_matrix(column_major)\n')
ret.append(line)
ret.append('#pragma pack_matrix(row_major)\n')
else:
ret.append(line)
return '\n'.join(ret)
def convert(source, dest):
vert_cmd = ['cgc', '-profile', 'glesv', '-entry', 'main_vertex']
with open(source, 'r') as f:
source_data = f.read()
p = subprocess.Popen(vert_cmd, stdin = subprocess.PIPE, stderr = subprocess.PIPE, stdout = subprocess.PIPE)
source_data = preprocess_vertex(source_data)
vertex_source, stderr_ret = p.communicate(input = source_data.encode())
log(stderr_ret.decode())
vertex_source = vertex_source.decode()
if p.returncode != 0:
log('Vertex compilation failed ...')
return 1
frag_cmd = ['cgc', '-profile', 'glesf', '-entry', 'main_fragment', source]
p = subprocess.Popen(frag_cmd, stderr = subprocess.PIPE, stdout = subprocess.PIPE)
fragment_source, stderr_ret = p.communicate()
log(stderr_ret.decode())
fragment_source = fragment_source.decode()
if p.returncode != 0:
log('Vertex compilation failed ...')
return 1
vertex_source = replace_global_vertex(vertex_source)
fragment_source = replace_global_fragment(fragment_source)
vertex_source = vertex_source.split('\n')
fragment_source = fragment_source.split('\n')
# Cg think we're using row-major matrices, but we're using column major.
# Also, Cg tends to compile matrix multiplications as dot products in GLSL.
# Hack in a fix for this.
log('Hacking vertex')
vertex_source = hack_source_vertex(vertex_source)
log('Hacking fragment')
fragment_source = hack_source_fragment(fragment_source)
# We compile to GLES, but we really just want modern GL ...
vertex_source = keep_line_if(lambda line: 'precision' not in line, vertex_source)
fragment_source = keep_line_if(lambda line: 'precision' not in line, fragment_source)
# Kill all comments. Cg adds lots of useless comments.
# Remove first line. It contains the name of the cg program.
vertex_source = remove_comments(vertex_source[1:])
fragment_source = remove_comments(fragment_source[1:])
vert_hacks = []
vert_hacks.append('''
"""
#if __VERSION__ >= 130
#define COMPAT_VARYING out
#define COMPAT_ATTRIBUTE in
#define COMPAT_TEXTURE texture
#else
#define COMPAT_VARYING varying
#define COMPAT_ATTRIBUTE attribute
#define COMPAT_TEXTURE texture2D
#endif
#ifdef GL_ES
#define COMPAT_PRECISION mediump
#else
#define COMPAT_PRECISION
#endif''')
out_vertex = '\n'.join(vert_hacks + vertex_source)
frag_hacks = []
frag_hacks.append('''
#if __VERSION__ >= 130
#define COMPAT_VARYING in
#define COMPAT_TEXTURE texture
out vec4 FragColor;
#else
#define COMPAT_VARYING varying
#define FragColor gl_FragColor
#define COMPAT_TEXTURE texture2D
#endif
#ifdef GL_ES
#ifdef GL_FRAGMENT_PRECISION_HIGH
precision highp float;
#else
precision mediump float;
#endif
#define COMPAT_PRECISION mediump
#else
#define COMPAT_PRECISION
#endif''')
out_fragment = '\n'.join(frag_hacks + fragment_source)
if not validate_shader(out_vertex, 'glesv'):
log('Vertex shader does not compile ...')
return 1
if not validate_shader(out_fragment, 'glesf'):
log('Fragment shader does not compile ...')
return 1
with open(dest, 'w') as f:
f.write('// GLSL shader autogenerated by cg2glsl.py.\n')
f.write('#if defined(VERTEX)\n')
f.write(out_vertex)
f.write('\n')
f.write('#elif defined(FRAGMENT)\n')
f.write(out_fragment)
f.write('\n')
f.write('#endif\n')
return 0
def convert_cgp(source, dest):
string = ''
with open(source, 'r') as f:
string = f.read().replace('.cg', '.glsl')
open(dest, 'w').write(string)
def path_ext(path):
_, ext = os.path.splitext(path)
return ext
def convert_path(source, source_dir, dest_dir, conv):
index = 0 if source_dir[-1] == '/' else 1
return os.path.join(dest_dir, source.replace(source_dir, '')[index:]).replace(conv[0], conv[1])
def main():
if len(sys.argv) != 3:
print('Usage: {} prog.cg(p) prog.glsl(p)'.format(sys.argv[0]))
print('Batch mode usage: {} cg-dir out-xml-shader-dir'.format(sys.argv[0]))
return 1
if os.path.isdir(sys.argv[1]):
global batch_mode
batch_mode = True
try:
os.makedirs(sys.argv[2])
except OSError as e:
if e.errno != errno.EEXIST:
raise
failed_cnt = 0
success_cnt = 0
failed_files = []
for dirname, _, filenames in os.walk(sys.argv[1]):
for source in filter(lambda path: path_ext(path) == '.cg', [os.path.join(dirname, filename) for filename in filenames]):
dest = convert_path(source, sys.argv[1], sys.argv[2], ('.cg', '.glsl'))
dirpath = os.path.split(dest)[0]
print('Dirpath:', dirpath)
if not os.path.isdir(dirpath):
try:
os.makedirs(dirpath)
except OSError as e:
if e.errno != errno.EEXIST:
raise
try:
ret = convert(source, dest)
print(source, '->', dest, '...', 'suceeded!' if ret == 0 else 'failed!')
if ret == 0:
success_cnt += 1
else:
failed_cnt += 1
failed_files.append(source)
except Exception as e:
print(e)
failed_files.append(source)
failed_cnt += 1
for source in filter(lambda path: path_ext(path) == '.cgp', [os.path.join(dirname, filename) for filename in filenames]):
dest = convert_path(source, sys.argv[1], sys.argv[2], ('.cgp', '.glslp'))
dirpath = os.path.split(dest)[0]
print('Dirpath:', dirpath)
if not os.path.isdir(dirpath):
try:
os.makedirs(dirpath)
except OSError as e:
if e.errno != errno.EEXIST:
raise
try:
convert_cgp(source, dest)
success_cnt += 1
except Exception as e:
print(e)
failed_files.append(source)
failed_cnt += 1
print(success_cnt, 'shaders converted successfully.')
print(failed_cnt, 'shaders failed.')
if failed_cnt > 0:
print('Failed shaders:')
for path in failed_files:
print(path)
else:
source = sys.argv[1]
dest = sys.argv[2]
if path_ext(source) == '.cgp':
sys.exit(convert_cgp(source, dest))
else:
sys.exit(convert(source, dest))
if __name__ == '__main__':
sys.exit(main())

View File

@ -1,180 +0,0 @@
"""A more or less complete user-defined wrapper around dictionary objects."""
class UserDict:
def __init__(self, dict=None, **kwargs):
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __repr__(self): return repr(self.data)
def __cmp__(self, dict):
if isinstance(dict, UserDict):
return cmp(self.data, dict.data)
else:
return cmp(self.data, dict)
__hash__ = None # Avoid Py3k warning
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def clear(self): self.data.clear()
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
def keys(self): return self.data.keys()
def items(self): return self.data.items()
def iteritems(self): return self.data.iteritems()
def iterkeys(self): return self.data.iterkeys()
def itervalues(self): return self.data.itervalues()
def values(self): return self.data.values()
def has_key(self, key): return key in self.data
def update(self, dict=None, **kwargs):
if dict is None:
pass
elif isinstance(dict, UserDict):
self.data.update(dict.data)
elif isinstance(dict, type({})) or not hasattr(dict, 'items'):
self.data.update(dict)
else:
for k, v in dict.items():
self[k] = v
if len(kwargs):
self.data.update(kwargs)
def get(self, key, failobj=None):
if key not in self:
return failobj
return self[key]
def setdefault(self, key, failobj=None):
if key not in self:
self[key] = failobj
return self[key]
def pop(self, key, *args):
return self.data.pop(key, *args)
def popitem(self):
return self.data.popitem()
def __contains__(self, key):
return key in self.data
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
class IterableUserDict(UserDict):
def __iter__(self):
return iter(self.data)
import _abcoll
_abcoll.MutableMapping.register(IterableUserDict)
class DictMixin:
# Mixin defining all dictionary methods for classes that already have
# a minimum dictionary interface including getitem, setitem, delitem,
# and keys. Without knowledge of the subclass constructor, the mixin
# does not define __init__() or copy(). In addition to the four base
# methods, progressively more efficiency comes with defining
# __contains__(), __iter__(), and iteritems().
# second level definitions support higher levels
def __iter__(self):
for k in self.keys():
yield k
def has_key(self, key):
try:
self[key]
except KeyError:
return False
return True
def __contains__(self, key):
return self.has_key(key)
# third level takes advantage of second level definitions
def iteritems(self):
for k in self:
yield (k, self[k])
def iterkeys(self):
return self.__iter__()
# fourth level uses definitions from lower levels
def itervalues(self):
for _, v in self.iteritems():
yield v
def values(self):
return [v for _, v in self.iteritems()]
def items(self):
return list(self.iteritems())
def clear(self):
for key in self.keys():
del self[key]
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def pop(self, key, *args):
if len(args) > 1:
raise TypeError, "pop expected at most 2 arguments, got "\
+ repr(1 + len(args))
try:
value = self[key]
except KeyError:
if args:
return args[0]
raise
del self[key]
return value
def popitem(self):
try:
k, v = self.iteritems().next()
except StopIteration:
raise KeyError, 'container is empty'
del self[k]
return (k, v)
def update(self, other=None, **kwargs):
# Make progressively weaker assumptions about "other"
if other is None:
pass
elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups
for k, v in other.iteritems():
self[k] = v
elif hasattr(other, 'keys'):
for k in other.keys():
self[k] = other[k]
else:
for k, v in other:
self[k] = v
if kwargs:
self.update(kwargs)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __repr__(self):
return repr(dict(self.iteritems()))
def __cmp__(self, other):
if other is None:
return 1
if isinstance(other, DictMixin):
other = dict(other.iteritems())
return cmp(dict(self.iteritems()), other)
def __len__(self):
return len(self.keys())

View File

@ -1,601 +0,0 @@
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for collections, according to PEP 3119.
DON'T USE THIS MODULE DIRECTLY! The classes here should be imported
via collections; they are defined here only to alleviate certain
bootstrapping issues. Unit tests are in test_collections.
"""
from abc import ABCMeta, abstractmethod
import sys
__all__ = ["Hashable", "Iterable", "Iterator",
"Sized", "Container", "Callable",
"Set", "MutableSet",
"Mapping", "MutableMapping",
"MappingView", "KeysView", "ItemsView", "ValuesView",
"Sequence", "MutableSequence",
]
### ONE-TRICK PONIES ###
def _hasattr(C, attr):
try:
return any(attr in B.__dict__ for B in C.__mro__)
except AttributeError:
# Old-style class
return hasattr(C, attr)
class Hashable:
__metaclass__ = ABCMeta
@abstractmethod
def __hash__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Hashable:
try:
for B in C.__mro__:
if "__hash__" in B.__dict__:
if B.__dict__["__hash__"]:
return True
break
except AttributeError:
# Old-style class
if getattr(C, "__hash__", None):
return True
return NotImplemented
class Iterable:
__metaclass__ = ABCMeta
@abstractmethod
def __iter__(self):
while False:
yield None
@classmethod
def __subclasshook__(cls, C):
if cls is Iterable:
if _hasattr(C, "__iter__"):
return True
return NotImplemented
Iterable.register(str)
class Iterator(Iterable):
@abstractmethod
def next(self):
raise StopIteration
def __iter__(self):
return self
@classmethod
def __subclasshook__(cls, C):
if cls is Iterator:
if _hasattr(C, "next") and _hasattr(C, "__iter__"):
return True
return NotImplemented
class Sized:
__metaclass__ = ABCMeta
@abstractmethod
def __len__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Sized:
if _hasattr(C, "__len__"):
return True
return NotImplemented
class Container:
__metaclass__ = ABCMeta
@abstractmethod
def __contains__(self, x):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Container:
if _hasattr(C, "__contains__"):
return True
return NotImplemented
class Callable:
__metaclass__ = ABCMeta
@abstractmethod
def __call__(self, *args, **kwds):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Callable:
if _hasattr(C, "__call__"):
return True
return NotImplemented
### SETS ###
class Set(Sized, Iterable, Container):
"""A set is a finite, iterable container.
This class provides concrete generic implementations of all
methods except for __contains__, __iter__ and __len__.
To override the comparisons (presumably for speed, as the
semantics are fixed), all you have to do is redefine __le__ and
then the other operations will automatically follow suit.
"""
def __le__(self, other):
if not isinstance(other, Set):
return NotImplemented
if len(self) > len(other):
return False
for elem in self:
if elem not in other:
return False
return True
def __lt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) < len(other) and self.__le__(other)
def __gt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return other < self
def __ge__(self, other):
if not isinstance(other, Set):
return NotImplemented
return other <= self
def __eq__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) == len(other) and self.__le__(other)
def __ne__(self, other):
return not (self == other)
@classmethod
def _from_iterable(cls, it):
'''Construct an instance of the class from any iterable input.
Must override this method if the class constructor signature
does not accept an iterable for an input.
'''
return cls(it)
def __and__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
return self._from_iterable(value for value in other if value in self)
def isdisjoint(self, other):
for value in other:
if value in self:
return False
return True
def __or__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
chain = (e for s in (self, other) for e in s)
return self._from_iterable(chain)
def __sub__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return self._from_iterable(value for value in self
if value not in other)
def __xor__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return (self - other) | (other - self)
# Sets are not hashable by default, but subclasses can change this
__hash__ = None
def _hash(self):
"""Compute the hash value of a set.
Note that we don't define __hash__: not all sets are hashable.
But if you define a hashable set type, its __hash__ should
call this function.
This must be compatible __eq__.
All sets ought to compare equal if they contain the same
elements, regardless of how they are implemented, and
regardless of the order of the elements; so there's not much
freedom for __eq__ or __hash__. We match the algorithm used
by the built-in frozenset type.
"""
MAX = sys.maxint
MASK = 2 * MAX + 1
n = len(self)
h = 1927868237 * (n + 1)
h &= MASK
for x in self:
hx = hash(x)
h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167
h &= MASK
h = h * 69069 + 907133923
h &= MASK
if h > MAX:
h -= MASK + 1
if h == -1:
h = 590923713
return h
Set.register(frozenset)
class MutableSet(Set):
@abstractmethod
def add(self, value):
"""Add an element."""
raise NotImplementedError
@abstractmethod
def discard(self, value):
"""Remove an element. Do not raise an exception if absent."""
raise NotImplementedError
def remove(self, value):
"""Remove an element. If not a member, raise a KeyError."""
if value not in self:
raise KeyError(value)
self.discard(value)
def pop(self):
"""Return the popped value. Raise KeyError if empty."""
it = iter(self)
try:
value = next(it)
except StopIteration:
raise KeyError
self.discard(value)
return value
def clear(self):
"""This is slow (creates N new iterators!) but effective."""
try:
while True:
self.pop()
except KeyError:
pass
def __ior__(self, it):
for value in it:
self.add(value)
return self
def __iand__(self, it):
for value in (self - it):
self.discard(value)
return self
def __ixor__(self, it):
if it is self:
self.clear()
else:
if not isinstance(it, Set):
it = self._from_iterable(it)
for value in it:
if value in self:
self.discard(value)
else:
self.add(value)
return self
def __isub__(self, it):
if it is self:
self.clear()
else:
for value in it:
self.discard(value)
return self
MutableSet.register(set)
### MAPPINGS ###
class Mapping(Sized, Iterable, Container):
@abstractmethod
def __getitem__(self, key):
raise KeyError
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def iterkeys(self):
return iter(self)
def itervalues(self):
for key in self:
yield self[key]
def iteritems(self):
for key in self:
yield (key, self[key])
def keys(self):
return list(self)
def items(self):
return [(key, self[key]) for key in self]
def values(self):
return [self[key] for key in self]
# Mappings are not hashable by default, but subclasses can change this
__hash__ = None
def __eq__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
return dict(self.items()) == dict(other.items())
def __ne__(self, other):
return not (self == other)
class MappingView(Sized):
def __init__(self, mapping):
self._mapping = mapping
def __len__(self):
return len(self._mapping)
def __repr__(self):
return '{0.__class__.__name__}({0._mapping!r})'.format(self)
class KeysView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, key):
return key in self._mapping
def __iter__(self):
for key in self._mapping:
yield key
class ItemsView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, item):
key, value = item
try:
v = self._mapping[key]
except KeyError:
return False
else:
return v == value
def __iter__(self):
for key in self._mapping:
yield (key, self._mapping[key])
class ValuesView(MappingView):
def __contains__(self, value):
for key in self._mapping:
if value == self._mapping[key]:
return True
return False
def __iter__(self):
for key in self._mapping:
yield self._mapping[key]
class MutableMapping(Mapping):
@abstractmethod
def __setitem__(self, key, value):
raise KeyError
@abstractmethod
def __delitem__(self, key):
raise KeyError
__marker = object()
def pop(self, key, default=__marker):
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def popitem(self):
try:
key = next(iter(self))
except StopIteration:
raise KeyError
value = self[key]
del self[key]
return key, value
def clear(self):
try:
while True:
self.popitem()
except KeyError:
pass
def update(*args, **kwds):
if len(args) > 2:
raise TypeError("update() takes at most 2 positional "
"arguments ({} given)".format(len(args)))
elif not args:
raise TypeError("update() takes at least 1 argument (0 given)")
self = args[0]
other = args[1] if len(args) >= 2 else ()
if isinstance(other, Mapping):
for key in other:
self[key] = other[key]
elif hasattr(other, "keys"):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
MutableMapping.register(dict)
### SEQUENCES ###
class Sequence(Sized, Iterable, Container):
"""All the operations on a read-only sequence.
Concrete subclasses must override __new__ or __init__,
__getitem__, and __len__.
"""
@abstractmethod
def __getitem__(self, index):
raise IndexError
def __iter__(self):
i = 0
try:
while True:
v = self[i]
yield v
i += 1
except IndexError:
return
def __contains__(self, value):
for v in self:
if v == value:
return True
return False
def __reversed__(self):
for i in reversed(range(len(self))):
yield self[i]
def index(self, value):
for i, v in enumerate(self):
if v == value:
return i
raise ValueError
def count(self, value):
return sum(1 for v in self if v == value)
Sequence.register(tuple)
Sequence.register(basestring)
Sequence.register(buffer)
Sequence.register(xrange)
class MutableSequence(Sequence):
@abstractmethod
def __setitem__(self, index, value):
raise IndexError
@abstractmethod
def __delitem__(self, index):
raise IndexError
@abstractmethod
def insert(self, index, value):
raise IndexError
def append(self, value):
self.insert(len(self), value)
def reverse(self):
n = len(self)
for i in range(n//2):
self[i], self[n-i-1] = self[n-i-1], self[i]
def extend(self, values):
for v in values:
self.append(v)
def pop(self, index=-1):
v = self[index]
del self[index]
return v
def remove(self, value):
del self[self.index(value)]
def __iadd__(self, values):
self.extend(values)
return self
MutableSequence.register(list)

View File

@ -1,216 +0,0 @@
# Access WeakSet through the weakref module.
# This code is separated-out because it is needed
# by abc.py to load everything else at startup.
from _weakref import ref
__all__ = ['WeakSet']
class _IterationGuard(object):
# This context manager registers itself in the current iterators of the
# weak container, such as to delay all removals until the context manager
# exits.
# This technique should be relatively thread-safe (since sets are).
def __init__(self, weakcontainer):
# Don't create cycles
self.weakcontainer = ref(weakcontainer)
def __enter__(self):
w = self.weakcontainer()
if w is not None:
w._iterating.add(self)
return self
def __exit__(self, e, t, b):
w = self.weakcontainer()
if w is not None:
s = w._iterating
s.remove(self)
if not s:
w._commit_removals()
class WeakSet(object):
def __init__(self, data=None):
self.data = set()
def _remove(item, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(item)
else:
self.data.discard(item)
self._remove = _remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
if data is not None:
self.update(data)
def _commit_removals(self):
l = self._pending_removals
discard = self.data.discard
while l:
discard(l.pop())
def __iter__(self):
with _IterationGuard(self):
for itemref in self.data:
item = itemref()
if item is not None:
yield item
def __len__(self):
return sum(x() is not None for x in self.data)
def __contains__(self, item):
try:
wr = ref(item)
except TypeError:
return False
return wr in self.data
def __reduce__(self):
return (self.__class__, (list(self),),
getattr(self, '__dict__', None))
__hash__ = None
def add(self, item):
if self._pending_removals:
self._commit_removals()
self.data.add(ref(item, self._remove))
def clear(self):
if self._pending_removals:
self._commit_removals()
self.data.clear()
def copy(self):
return self.__class__(self)
def pop(self):
if self._pending_removals:
self._commit_removals()
while True:
try:
itemref = self.data.pop()
except KeyError:
raise KeyError('pop from empty WeakSet')
item = itemref()
if item is not None:
return item
def remove(self, item):
if self._pending_removals:
self._commit_removals()
self.data.remove(ref(item))
def discard(self, item):
if self._pending_removals:
self._commit_removals()
self.data.discard(ref(item))
def update(self, other):
if self._pending_removals:
self._commit_removals()
if isinstance(other, self.__class__):
self.data.update(other.data)
else:
for element in other:
self.add(element)
def __ior__(self, other):
self.update(other)
return self
# Helper functions for simple delegating methods.
def _apply(self, other, method):
if not isinstance(other, self.__class__):
other = self.__class__(other)
newdata = method(other.data)
newset = self.__class__()
newset.data = newdata
return newset
def difference(self, other):
return self._apply(other, self.data.difference)
__sub__ = difference
def difference_update(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.difference_update(ref(item) for item in other)
def __isub__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.difference_update(ref(item) for item in other)
return self
def intersection(self, other):
return self._apply(other, self.data.intersection)
__and__ = intersection
def intersection_update(self, other):
if self._pending_removals:
self._commit_removals()
self.data.intersection_update(ref(item) for item in other)
def __iand__(self, other):
if self._pending_removals:
self._commit_removals()
self.data.intersection_update(ref(item) for item in other)
return self
def issubset(self, other):
return self.data.issubset(ref(item) for item in other)
__lt__ = issubset
def __le__(self, other):
return self.data <= set(ref(item) for item in other)
def issuperset(self, other):
return self.data.issuperset(ref(item) for item in other)
__gt__ = issuperset
def __ge__(self, other):
return self.data >= set(ref(item) for item in other)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.data == set(ref(item) for item in other)
def symmetric_difference(self, other):
return self._apply(other, self.data.symmetric_difference)
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.symmetric_difference_update(ref(item) for item in other)
def __ixor__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.symmetric_difference_update(ref(item) for item in other)
return self
def union(self, other):
return self._apply(other, self.data.union)
__or__ = union
def isdisjoint(self, other):
return len(self.intersection(other)) == 0

View File

@ -1,185 +0,0 @@
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) according to PEP 3119."""
import types
from _weakrefset import WeakSet
# Instance of old-style class
class _C: pass
_InstanceType = type(_C())
def abstractmethod(funcobj):
"""A decorator indicating abstract methods.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms.
Usage:
class C:
__metaclass__ = ABCMeta
@abstractmethod
def my_abstract_method(self, ...):
...
"""
funcobj.__isabstractmethod__ = True
return funcobj
class abstractproperty(property):
"""A decorator indicating abstract properties.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract properties are overridden.
The abstract properties can be called using any of the normal
'super' call mechanisms.
Usage:
class C:
__metaclass__ = ABCMeta
@abstractproperty
def my_abstract_property(self):
...
This defines a read-only property; you can also define a read-write
abstract property using the 'long' form of property declaration:
class C:
__metaclass__ = ABCMeta
def getx(self): ...
def setx(self, value): ...
x = abstractproperty(getx, setx)
"""
__isabstractmethod__ = True
class ABCMeta(type):
"""Metaclass for defining Abstract Base Classes (ABCs).
Use this metaclass to create an ABC. An ABC can be subclassed
directly, and then acts as a mix-in class. You can also register
unrelated concrete classes (even built-in classes) and unrelated
ABCs as 'virtual subclasses' -- these and their descendants will
be considered subclasses of the registering ABC by the built-in
issubclass() function, but the registering ABC won't show up in
their MRO (Method Resolution Order) nor will method
implementations defined by the registering ABC be callable (not
even via super()).
"""
# A global counter that is incremented each time a class is
# registered as a virtual subclass of anything. It forces the
# negative cache to be cleared before its next use.
_abc_invalidation_counter = 0
def __new__(mcls, name, bases, namespace):
cls = super(ABCMeta, mcls).__new__(mcls, name, bases, namespace)
# Compute set of abstract method names
abstracts = set(name
for name, value in namespace.items()
if getattr(value, "__isabstractmethod__", False))
for base in bases:
for name in getattr(base, "__abstractmethods__", set()):
value = getattr(cls, name, None)
if getattr(value, "__isabstractmethod__", False):
abstracts.add(name)
cls.__abstractmethods__ = frozenset(abstracts)
# Set up inheritance registry
cls._abc_registry = WeakSet()
cls._abc_cache = WeakSet()
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
return cls
def register(cls, subclass):
"""Register a virtual subclass of an ABC."""
if not isinstance(subclass, (type, types.ClassType)):
raise TypeError("Can only register classes")
if issubclass(subclass, cls):
return # Already a subclass
# Subtle: test for cycles *after* testing for "already a subclass";
# this means we allow X.register(X) and interpret it as a no-op.
if issubclass(cls, subclass):
# This would create a cycle, which is bad for the algorithm below
raise RuntimeError("Refusing to create an inheritance cycle")
cls._abc_registry.add(subclass)
ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache
def _dump_registry(cls, file=None):
"""Debug helper to print the ABC registry."""
print >> file, "Class: %s.%s" % (cls.__module__, cls.__name__)
print >> file, "Inv.counter: %s" % ABCMeta._abc_invalidation_counter
for name in sorted(cls.__dict__.keys()):
if name.startswith("_abc_"):
value = getattr(cls, name)
print >> file, "%s: %r" % (name, value)
def __instancecheck__(cls, instance):
"""Override for isinstance(instance, cls)."""
# Inline the cache checking when it's simple.
subclass = getattr(instance, '__class__', None)
if subclass is not None and subclass in cls._abc_cache:
return True
subtype = type(instance)
# Old-style instances
if subtype is _InstanceType:
subtype = subclass
if subtype is subclass or subclass is None:
if (cls._abc_negative_cache_version ==
ABCMeta._abc_invalidation_counter and
subtype in cls._abc_negative_cache):
return False
# Fall back to the subclass check.
return cls.__subclasscheck__(subtype)
return (cls.__subclasscheck__(subclass) or
cls.__subclasscheck__(subtype))
def __subclasscheck__(cls, subclass):
"""Override for issubclass(subclass, cls)."""
# Check cache
if subclass in cls._abc_cache:
return True
# Check negative cache; may have to invalidate
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
# Invalidate the negative cache
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
elif subclass in cls._abc_negative_cache:
return False
# Check the subclass hook
ok = cls.__subclasshook__(subclass)
if ok is not NotImplemented:
assert isinstance(ok, bool)
if ok:
cls._abc_cache.add(subclass)
else:
cls._abc_negative_cache.add(subclass)
return ok
# Check if it's a direct subclass
if cls in getattr(subclass, '__mro__', ()):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a registered class (recursive)
for rcls in cls._abc_registry:
if issubclass(subclass, rcls):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a subclass (recursive)
for scls in cls.__subclasses__():
if issubclass(subclass, scls):
cls._abc_cache.add(subclass)
return True
# No dice; update negative cache
cls._abc_negative_cache.add(subclass)
return False

View File

@ -1,92 +0,0 @@
"""Bisection algorithms."""
def insort_right(a, x, lo=0, hi=None):
"""Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the right of the rightmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if x < a[mid]: hi = mid
else: lo = mid+1
a.insert(lo, x)
insort = insort_right # backward compatibility
def bisect_right(a, x, lo=0, hi=None):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e <= x, and all e in
a[i:] have e > x. So if x already appears in the list, a.insert(x) will
insert just after the rightmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if x < a[mid]: hi = mid
else: lo = mid+1
return lo
bisect = bisect_right # backward compatibility
def insort_left(a, x, lo=0, hi=None):
"""Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the left of the leftmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if a[mid] < x: lo = mid+1
else: hi = mid
a.insert(lo, x)
def bisect_left(a, x, lo=0, hi=None):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e < x, and all e in
a[i:] have e >= x. So if x already appears in the list, a.insert(x) will
insert just before the leftmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if a[mid] < x: lo = mid+1
else: hi = mid
return lo
# Overwrite above definitions with a fast C implementation
try:
from _bisect import *
except ImportError:
pass

View File

@ -1,667 +0,0 @@
__all__ = ['Counter', 'deque', 'defaultdict', 'namedtuple', 'OrderedDict']
# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py.
# They should however be considered an integral part of collections.py.
from _abcoll import *
import _abcoll
__all__ += _abcoll.__all__
from _collections import deque, defaultdict
from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword
import sys as _sys
import heapq as _heapq
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
################################################################################
### OrderedDict
################################################################################
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries, but keyword arguments are not recommended because
their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, PREV=0, NEXT=1, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[PREV]
last[NEXT] = root[PREV] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, PREV=0, NEXT=1, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
NEXT, KEY = 1, 2
root = self.__root
curr = root[NEXT]
while curr is not root:
yield curr[KEY]
curr = curr[NEXT]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
PREV, KEY = 0, 2
root = self.__root
curr = root[PREV]
while curr is not root:
yield curr[KEY]
curr = curr[PREV]
def clear(self):
'od.clear() -> None. Remove all items from od.'
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
dict.clear(self)
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) pairs in od'
for k in self:
yield (k, self[k])
update = MutableMapping.update
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
key = next(reversed(self) if last else iter(self))
value = self.pop(key)
return key, value
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
'od.__ne__(y) <==> od!=y'
return not self == other
# -- the following methods support python 3.x style dictionary views --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
################################################################################
### namedtuple
################################################################################
def namedtuple(typename, field_names, verbose=False, rename=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', 'x y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = tuple(map(str, field_names))
if rename:
names = list(field_names)
seen = set()
for i, name in enumerate(names):
if (not all(c.isalnum() or c=='_' for c in name) or _iskeyword(name)
or not name or name[0].isdigit() or name.startswith('_')
or name in seen):
names[i] = '_%d' % i
seen.add(name)
field_names = tuple(names)
for name in (typename,) + field_names:
if not all(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(_cls, %(argtxt)s):
'Create new instance of %(typename)s(%(argtxt)s)'
return _tuple.__new__(_cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
'Return a nicely formatted representation string'
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(self):
'Return a new OrderedDict which maps field names to their values'
return OrderedDict(zip(self._fields, self)) \n
def _replace(_self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = _self._make(map(kwds.pop, %(field_names)r, _self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self) \n\n''' % locals()
for i, name in enumerate(field_names):
template += " %s = _property(_itemgetter(%d), doc='Alias for field number %d')\n" % (name, i, i)
if verbose:
print template
# Execute the template string in a temporary namespace and
# support tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
OrderedDict=OrderedDict, _property=property, _tuple=tuple)
try:
exec template in namespace
except SyntaxError, e:
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result
########################################################################
### Counter
########################################################################
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values()) # total of all counts
15
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['b'] # remove all 'b'
>>> c['b'] # now there are zero 'b'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
super(Counter, self).__init__()
self.update(iterable, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.iteritems(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.iteritems(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, elements() will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return _chain.from_iterable(_starmap(_repeat, self.iteritems()))
# Override dict methods where necessary
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because setting v=1
# means that no element can have a count greater than one.
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
# The regular dict.update() operation makes no sense here because the
# replace behavior results in the some of original untouched counts
# being mixed-in with all of the other counts for a mismash that
# doesn't have a straight-forward interpretation in most counting
# contexts. Instead, we implement straight-addition. Both the inputs
# and outputs are allowed to contain zero and negative counts.
if iterable is not None:
if isinstance(iterable, Mapping):
if self:
self_get = self.get
for elem, count in iterable.iteritems():
self[elem] = self_get(elem, 0) + count
else:
super(Counter, self).update(iterable) # fast path when counter is empty
else:
self_get = self.get
for elem in iterable:
self[elem] = self_get(elem, 0) + 1
if kwds:
self.update(kwds)
def subtract(self, iterable=None, **kwds):
'''Like dict.update() but subtracts counts instead of replacing them.
Counts can be reduced below zero. Both the inputs and outputs are
allowed to contain zero and negative counts.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.subtract('witch') # subtract elements from another iterable
>>> c.subtract(Counter('watch')) # subtract elements from another counter
>>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
0
>>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
-1
'''
if iterable is not None:
self_get = self.get
if isinstance(iterable, Mapping):
for elem, count in iterable.items():
self[elem] = self_get(elem, 0) - count
else:
for elem in iterable:
self[elem] = self_get(elem, 0) - 1
if kwds:
self.subtract(kwds)
def copy(self):
'Return a shallow copy.'
return self.__class__(self)
def __reduce__(self):
return self.__class__, (dict(self),)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
super(Counter, self).__delitem__(elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count + other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count - other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count < 0:
result[elem] = 0 - count
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = other_count if count < other_count else count
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = count if count < other_count else other_count
if newcount > 0:
result[elem] = newcount
return result
if __name__ == '__main__':
# verify that instances can be pickled
from cPickle import loads, dumps
Point = namedtuple('Point', 'x, y', True)
p = Point(x=10, y=20)
assert p == loads(dumps(p))
# test and demonstrate ability to override methods
class Point(namedtuple('Point', 'x y')):
__slots__ = ()
@property
def hypot(self):
return (self.x ** 2 + self.y ** 2) ** 0.5
def __str__(self):
return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot)
for p in Point(3, 4), Point(14, 5/7.):
print p
class Point(namedtuple('Point', 'x y')):
'Point class with optimized _make() and _replace() without error-checking'
__slots__ = ()
_make = classmethod(tuple.__new__)
def _replace(self, _map=map, **kwds):
return self._make(_map(kwds.get, ('x', 'y'), self))
print Point(11, 22)._replace(x=100)
Point3D = namedtuple('Point3D', Point._fields + ('z',))
print Point3D.__doc__
import doctest
TestResults = namedtuple('TestResults', 'failed attempted')
print TestResults(*doctest.testmod())

View File

@ -1,105 +0,0 @@
"""
Path operations common to more than one OS
Do not use directly. The OS specific modules import the appropriate
functions from this module themselves.
"""
import os
import stat
__all__ = ['commonprefix', 'exists', 'getatime', 'getctime', 'getmtime',
'getsize', 'isdir', 'isfile']
# Does a path exist?
# This is false for dangling symbolic links on systems that support them.
def exists(path):
"""Test whether a path exists. Returns False for broken symbolic links"""
try:
os.stat(path)
except os.error:
return False
return True
# This follows symbolic links, so both islink() and isdir() can be true
# for the same path ono systems that support symlinks
def isfile(path):
"""Test whether a path is a regular file"""
try:
st = os.stat(path)
except os.error:
return False
return stat.S_ISREG(st.st_mode)
# Is a path a directory?
# This follows symbolic links, so both islink() and isdir()
# can be true for the same path on systems that support symlinks
def isdir(s):
"""Return true if the pathname refers to an existing directory."""
try:
st = os.stat(s)
except os.error:
return False
return stat.S_ISDIR(st.st_mode)
def getsize(filename):
"""Return the size of a file, reported by os.stat()."""
return os.stat(filename).st_size
def getmtime(filename):
"""Return the last modification time of a file, reported by os.stat()."""
return os.stat(filename).st_mtime
def getatime(filename):
"""Return the last access time of a file, reported by os.stat()."""
return os.stat(filename).st_atime
def getctime(filename):
"""Return the metadata change time of a file, reported by os.stat()."""
return os.stat(filename).st_ctime
# Return the longest prefix of all list elements.
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
s1 = min(m)
s2 = max(m)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
# Generic implementation of splitext, to be parametrized with
# the separators
def _splitext(p, sep, altsep, extsep):
"""Split the extension from a pathname.
Extension is everything from the last dot to the end, ignoring
leading dots. Returns "(root, ext)"; ext may be empty."""
sepIndex = p.rfind(sep)
if altsep:
altsepIndex = p.rfind(altsep)
sepIndex = max(sepIndex, altsepIndex)
dotIndex = p.rfind(extsep)
if dotIndex > sepIndex:
# skip all leading dots
filenameIndex = sepIndex + 1
while filenameIndex < dotIndex:
if p[filenameIndex] != extsep:
return p[:dotIndex], p[dotIndex:]
filenameIndex += 1
return p, ''

View File

@ -1,442 +0,0 @@
# -*- coding: latin-1 -*-
"""Heap queue algorithm (a.k.a. priority queue).
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
Usage:
heap = [] # creates an empty heap
heappush(heap, item) # pushes a new item on the heap
item = heappop(heap) # pops the smallest item from the heap
item = heap[0] # smallest item on the heap without popping it
heapify(x) # transforms list into a heap, in-place, in linear time
item = heapreplace(heap, item) # pops and returns smallest item, and adds
# new item; the heap size is unchanged
Our API differs from textbook heap algorithms as follows:
- We use 0-based indexing. This makes the relationship between the
index for a node and the indexes for its children slightly less
obvious, but is more suitable since Python uses 0-based indexing.
- Our heappop() method returns the smallest item, not the largest.
These two make it possible to view the heap as a regular Python list
without surprises: heap[0] is the smallest item, and heap.sort()
maintains the heap invariant!
"""
# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
__about__ = """Heap queues
[explanation by François Pinard]
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
The strange invariant above is meant to be an efficient memory
representation for a tournament. The numbers below are `k', not a[k]:
0
1 2
3 4 5 6
7 8 9 10 11 12 13 14
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
an usual binary tournament we see in sports, each cell is the winner
over the two cells it tops, and we can trace the winner down the tree
to see all opponents s/he had. However, in many computer applications
of such tournaments, we do not need to trace the history of a winner.
To be more memory efficient, when a winner is promoted, we try to
replace it by something else at a lower level, and the rule becomes
that a cell and the two cells it tops contain three different items,
but the top cell "wins" over the two topped cells.
If this heap invariant is protected at all time, index 0 is clearly
the overall winner. The simplest algorithmic way to remove it and
find the "next" winner is to move some loser (let's say cell 30 in the
diagram above) into the 0 position, and then percolate this new 0 down
the tree, exchanging values, until the invariant is re-established.
This is clearly logarithmic on the total number of items in the tree.
By iterating over all items, you get an O(n ln n) sort.
A nice feature of this sort is that you can efficiently insert new
items while the sort is going on, provided that the inserted items are
not "better" than the last 0'th element you extracted. This is
especially useful in simulation contexts, where the tree holds all
incoming events, and the "win" condition means the smallest scheduled
time. When an event schedule other events for execution, they are
scheduled into the future, so they can easily go into the heap. So, a
heap is a good structure for implementing schedulers (this is what I
used for my MIDI sequencer :-).
Various structures for implementing schedulers have been extensively
studied, and heaps are good for this, as they are reasonably speedy,
the speed is almost constant, and the worst case is not much different
than the average case. However, there are other representations which
are more efficient overall, yet the worst cases might be terrible.
Heaps are also very useful in big disk sorts. You most probably all
know that a big sort implies producing "runs" (which are pre-sorted
sequences, which size is usually related to the amount of CPU memory),
followed by a merging passes for these runs, which merging is often
very cleverly organised[1]. It is very important that the initial
sort produces the longest runs possible. Tournaments are a good way
to that. If, using all the memory available to hold a tournament, you
replace and percolate items that happen to fit the current run, you'll
produce runs which are twice the size of the memory for random input,
and much better for input fuzzily ordered.
Moreover, if you output the 0'th item on disk and get an input which
may not fit in the current tournament (because the value "wins" over
the last output value), it cannot fit in the heap, so the size of the
heap decreases. The freed memory could be cleverly reused immediately
for progressively building a second heap, which grows at exactly the
same rate the first heap is melting. When the first heap completely
vanishes, you switch heaps and start a new run. Clever and quite
effective!
In a word, heaps are useful memory structures to know. I use them in
a few applications, and I think it is good to keep a `heap' module
around. :-)
--------------------
[1] The disk balancing algorithms which are current, nowadays, are
more annoying than clever, and this is a consequence of the seeking
capabilities of the disks. On devices which cannot seek, like big
tape drives, the story was quite different, and one had to be very
clever to ensure (far in advance) that each tape movement will be the
most effective possible (that is, will best participate at
"progressing" the merge). Some tapes were even able to read
backwards, and this was also used to avoid the rewinding time.
Believe me, real good tape sorts were quite spectacular to watch!
From all times, sorting has always been a Great Art! :-)
"""
__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
'nlargest', 'nsmallest', 'heappushpop']
from itertools import islice, repeat, count, imap, izip, tee, chain
from operator import itemgetter
import bisect
def cmp_lt(x, y):
# Use __lt__ if available; otherwise, try __le__.
# In Py3.x, only __lt__ will be called.
return (x < y) if hasattr(x, '__lt__') else (not y <= x)
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown(heap, 0, len(heap)-1)
def heappop(heap):
"""Pop the smallest item off the heap, maintaining the heap invariant."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup(heap, 0)
else:
returnitem = lastelt
return returnitem
def heapreplace(heap, item):
"""Pop and return the current smallest value, and add the new item.
This is more efficient than heappop() followed by heappush(), and can be
more appropriate when using a fixed-size heap. Note that the value
returned may be larger than item! That constrains reasonable uses of
this routine unless written as part of a conditional replacement:
if item > heap[0]:
item = heapreplace(heap, item)
"""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup(heap, 0)
return returnitem
def heappushpop(heap, item):
"""Fast version of a heappush followed by a heappop."""
if heap and cmp_lt(heap[0], item):
item, heap[0] = heap[0], item
_siftup(heap, 0)
return item
def heapify(x):
"""Transform list into a heap, in-place, in O(len(x)) time."""
n = len(x)
# Transform bottom-up. The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
for i in reversed(xrange(n//2)):
_siftup(x, i)
def nlargest(n, iterable):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, reverse=True)[:n]
"""
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
heapify(result)
_heappushpop = heappushpop
for elem in it:
_heappushpop(result, elem)
result.sort(reverse=True)
return result
def nsmallest(n, iterable):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable)[:n]
"""
if hasattr(iterable, '__len__') and n * 10 <= len(iterable):
# For smaller values of n, the bisect method is faster than a minheap.
# It is also memory efficient, consuming only n elements of space.
it = iter(iterable)
result = sorted(islice(it, 0, n))
if not result:
return result
insort = bisect.insort
pop = result.pop
los = result[-1] # los --> Largest of the nsmallest
for elem in it:
if cmp_lt(elem, los):
insort(result, elem)
pop()
los = result[-1]
return result
# An alternative approach manifests the whole iterable in memory but
# saves comparisons by heapifying all at once. Also, saves time
# over bisect.insort() which has O(n) data movement time for every
# insertion. Finding the n smallest of an m length iterable requires
# O(m) + O(n log m) comparisons.
h = list(iterable)
heapify(h)
return map(heappop, repeat(h, min(n, len(h))))
# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
# is the index of a leaf with a possibly out-of-order value. Restore the
# heap invariant.
def _siftdown(heap, startpos, pos):
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if cmp_lt(newitem, parent):
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
# The child indices of heap index pos are already heaps, and we want to make
# a heap at index pos too. We do this by bubbling the smaller child of
# pos up (and so on with that child's children, etc) until hitting a leaf,
# then using _siftdown to move the oddball originally at index pos into place.
#
# We *could* break out of the loop as soon as we find a pos where newitem <=
# both its children, but turns out that's not a good idea, and despite that
# many books write the algorithm that way. During a heap pop, the last array
# element is sifted in, and that tends to be large, so that comparing it
# against values starting from the root usually doesn't pay (= usually doesn't
# get us out of the loop early). See Knuth, Volume 3, where this is
# explained and quantified in an exercise.
#
# Cutting the # of comparisons is important, since these routines have no
# way to extract "the priority" from an array element, so that intelligence
# is likely to be hiding in custom __cmp__ methods, or in array elements
# storing (priority, record) tuples. Comparisons are thus potentially
# expensive.
#
# On random arrays of length 1000, making this change cut the number of
# comparisons made by heapify() a little, and those made by exhaustive
# heappop() a lot, in accord with theory. Here are typical results from 3
# runs (3 just to demonstrate how small the variance is):
#
# Compares needed by heapify Compares needed by 1000 heappops
# -------------------------- --------------------------------
# 1837 cut to 1663 14996 cut to 8680
# 1855 cut to 1659 14966 cut to 8678
# 1847 cut to 1660 15024 cut to 8703
#
# Building the heap by using heappush() 1000 times instead required
# 2198, 2148, and 2219 compares: heapify() is more efficient, when
# you can use it.
#
# The total compares needed by list.sort() on the same lists were 8627,
# 8627, and 8632 (this should be compared to the sum of heapify() and
# heappop() compares): list.sort() is (unsurprisingly!) more efficient
# for sorting.
def _siftup(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the smaller child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of smaller child.
rightpos = childpos + 1
if rightpos < endpos and not cmp_lt(heap[childpos], heap[rightpos]):
childpos = rightpos
# Move the smaller child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown(heap, startpos, pos)
# If available, use C implementation
try:
from _heapq import *
except ImportError:
pass
def merge(*iterables):
'''Merge multiple sorted inputs into a single sorted output.
Similar to sorted(itertools.chain(*iterables)) but returns a generator,
does not pull the data into memory all at once, and assumes that each of
the input streams is already sorted (smallest to largest).
>>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))
[0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
'''
_heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration
h = []
h_append = h.append
for itnum, it in enumerate(map(iter, iterables)):
try:
next = it.next
h_append([next(), itnum, next])
except _StopIteration:
pass
heapify(h)
while 1:
try:
while 1:
v, itnum, next = s = h[0] # raises IndexError when h is empty
yield v
s[0] = next() # raises StopIteration when exhausted
_heapreplace(h, s) # restore heap condition
except _StopIteration:
_heappop(h) # remove empty iterator
except IndexError:
return
# Extend the implementations of nsmallest and nlargest to use a key= argument
_nsmallest = nsmallest
def nsmallest(n, iterable, key=None):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable, key=key)[:n]
"""
# Short-cut for n==1 is to use min() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [min(chain(head, it))]
return [min(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key)[:n]
# When key is none, use simpler decoration
if key is None:
it = izip(iterable, count()) # decorate
result = _nsmallest(n, it)
return map(itemgetter(0), result) # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = izip(imap(key, in1), count(), in2) # decorate
result = _nsmallest(n, it)
return map(itemgetter(2), result) # undecorate
_nlargest = nlargest
def nlargest(n, iterable, key=None):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
"""
# Short-cut for n==1 is to use max() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [max(chain(head, it))]
return [max(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key, reverse=True)[:n]
# When key is none, use simpler decoration
if key is None:
it = izip(iterable, count(0,-1)) # decorate
result = _nlargest(n, it)
return map(itemgetter(0), result) # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = izip(imap(key, in1), count(0,-1), in2) # decorate
result = _nlargest(n, it)
return map(itemgetter(2), result) # undecorate
if __name__ == "__main__":
# Simple sanity test
heap = []
data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]
for item in data:
heappush(heap, item)
sort = []
while heap:
sort.append(heappop(heap))
print sort
import doctest
doctest.testmod()

View File

@ -1,93 +0,0 @@
#! /usr/bin/env python
"""Keywords (from "graminit.c")
This file is automatically generated; please don't muck it up!
To update the symbols in this file, 'cd' to the top directory of
the python source tree after building the interpreter and run:
python Lib/keyword.py
"""
__all__ = ["iskeyword", "kwlist"]
kwlist = [
#--start keywords--
'and',
'as',
'assert',
'break',
'class',
'continue',
'def',
'del',
'elif',
'else',
'except',
'exec',
'finally',
'for',
'from',
'global',
'if',
'import',
'in',
'is',
'lambda',
'not',
'or',
'pass',
'print',
'raise',
'return',
'try',
'while',
'with',
'yield',
#--end keywords--
]
iskeyword = frozenset(kwlist).__contains__
def main():
import sys, re
args = sys.argv[1:]
iptfile = args and args[0] or "Python/graminit.c"
if len(args) > 1: optfile = args[1]
else: optfile = "Lib/keyword.py"
# scan the source file for keywords
fp = open(iptfile)
strprog = re.compile('"([^"]+)"')
lines = []
for line in fp:
if '{1, "' in line:
match = strprog.search(line)
if match:
lines.append(" '" + match.group(1) + "',\n")
fp.close()
lines.sort()
# load the output skeleton from the target
fp = open(optfile)
format = fp.readlines()
fp.close()
# insert the lines of keywords
try:
start = format.index("#--start keywords--\n") + 1
end = format.index("#--end keywords--\n")
format[start:end] = lines
except ValueError:
sys.stderr.write("target does not contain format markers\n")
sys.exit(1)
# write the output file
fp = open(optfile, 'w')
fp.write(''.join(format))
fp.close()
if __name__ == "__main__":
main()

View File

@ -1,135 +0,0 @@
"""Cache lines from files.
This is intended to read lines from modules imported -- hence if a filename
is not found, it will look down the module search path for a file by
that name.
"""
import sys
import os
__all__ = ["getline", "clearcache", "checkcache"]
def getline(filename, lineno, module_globals=None):
lines = getlines(filename, module_globals)
if 1 <= lineno <= len(lines):
return lines[lineno-1]
else:
return ''
# The cache
cache = {} # The cache
def clearcache():
"""Clear the cache entirely."""
global cache
cache = {}
def getlines(filename, module_globals=None):
"""Get the lines for a file from the cache.
Update the cache if it doesn't contain an entry for this file already."""
if filename in cache:
return cache[filename][2]
else:
return updatecache(filename, module_globals)
def checkcache(filename=None):
"""Discard cache entries that are out of date.
(This is not checked upon each call!)"""
if filename is None:
filenames = cache.keys()
else:
if filename in cache:
filenames = [filename]
else:
return
for filename in filenames:
size, mtime, lines, fullname = cache[filename]
if mtime is None:
continue # no-op for files loaded via a __loader__
try:
stat = os.stat(fullname)
except os.error:
del cache[filename]
continue
if size != stat.st_size or mtime != stat.st_mtime:
del cache[filename]
def updatecache(filename, module_globals=None):
"""Update a cache entry and return its list of lines.
If something's wrong, print a message, discard the cache entry,
and return an empty list."""
if filename in cache:
del cache[filename]
if not filename or (filename.startswith('<') and filename.endswith('>')):
return []
fullname = filename
try:
stat = os.stat(fullname)
except OSError:
basename = filename
# Try for a __loader__, if available
if module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
try:
data = get_source(name)
except (ImportError, IOError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return []
cache[filename] = (
len(data), None,
[line+'\n' for line in data.splitlines()], fullname
)
return cache[filename][2]
# Try looking through the module search path, which is only useful
# when handling a relative filename.
if os.path.isabs(filename):
return []
for dirname in sys.path:
# When using imputil, sys.path may contain things other than
# strings; ignore them when it happens.
try:
fullname = os.path.join(dirname, basename)
except (TypeError, AttributeError):
# Not sufficiently string-like to do anything useful with.
continue
try:
stat = os.stat(fullname)
break
except os.error:
pass
else:
return []
try:
with open(fullname, 'rU') as fp:
lines = fp.readlines()
except IOError:
return []
if lines and not lines[-1].endswith('\n'):
lines[-1] += '\n'
size, mtime = stat.st_size, stat.st_mtime
cache[filename] = size, mtime, lines, fullname
return lines

View File

@ -1,523 +0,0 @@
# Module 'ntpath' -- common operations on WinNT/Win95 pathnames
"""Common pathname manipulations, WindowsNT/95 version.
Instead of importing this module directly, import os and refer to this
module as os.path.
"""
import os
import sys
import stat
import genericpath
import warnings
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"ismount","walk","expanduser","expandvars","normpath","abspath",
"splitunc","curdir","pardir","sep","pathsep","defpath","altsep",
"extsep","devnull","realpath","supports_unicode_filenames","relpath"]
# strings representing various path-related bits and pieces
curdir = '.'
pardir = '..'
extsep = '.'
sep = '\\'
pathsep = ';'
altsep = '/'
defpath = '.;C:\\bin'
if 'ce' in sys.builtin_module_names:
defpath = '\\Windows'
elif 'os2' in sys.builtin_module_names:
# OS/2 w/ VACPP
altsep = '/'
devnull = 'nul'
# Normalize the case of a pathname and map slashes to backslashes.
# Other normalizations (such as optimizing '../' away) are not done
# (this is done by normpath).
def normcase(s):
"""Normalize case of pathname.
Makes all characters lowercase and all slashes into backslashes."""
return s.replace("/", "\\").lower()
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
# For DOS it is absolute if it starts with a slash or backslash (current
# volume), or if a pathname after the volume letter and colon / UNC resource
# starts with a slash or backslash.
def isabs(s):
"""Test whether a path is absolute"""
s = splitdrive(s)[1]
return s != '' and s[:1] in '/\\'
# Join two (or more) paths.
def join(a, *p):
"""Join two or more pathname components, inserting "\\" as needed.
If any component is an absolute path, all previous path components
will be discarded."""
path = a
for b in p:
b_wins = 0 # set to 1 iff b makes path irrelevant
if path == "":
b_wins = 1
elif isabs(b):
# This probably wipes out path so far. However, it's more
# complicated if path begins with a drive letter:
# 1. join('c:', '/a') == 'c:/a'
# 2. join('c:/', '/a') == 'c:/a'
# But
# 3. join('c:/a', '/b') == '/b'
# 4. join('c:', 'd:/') = 'd:/'
# 5. join('c:/', 'd:/') = 'd:/'
if path[1:2] != ":" or b[1:2] == ":":
# Path doesn't start with a drive letter, or cases 4 and 5.
b_wins = 1
# Else path has a drive letter, and b doesn't but is absolute.
elif len(path) > 3 or (len(path) == 3 and
path[-1] not in "/\\"):
# case 3
b_wins = 1
if b_wins:
path = b
else:
# Join, and ensure there's a separator.
assert len(path) > 0
if path[-1] in "/\\":
if b and b[0] in "/\\":
path += b[1:]
else:
path += b
elif path[-1] == ":":
path += b
elif b:
if b[0] in "/\\":
path += b
else:
path += "\\" + b
else:
# path is not empty and does not end with a backslash,
# but b is empty; since, e.g., split('a/') produces
# ('a', ''), it's best if join() adds a backslash in
# this case.
path += '\\'
return path
# Split a path in a drive specification (a drive letter followed by a
# colon) and the path specification.
# It is always true that drivespec + pathspec == p
def splitdrive(p):
"""Split a pathname into drive and path specifiers. Returns a 2-tuple
"(drive,path)"; either part may be empty"""
if p[1:2] == ':':
return p[0:2], p[2:]
return '', p
# Parse UNC paths
def splitunc(p):
"""Split a pathname into UNC mount point and relative path specifiers.
Return a 2-tuple (unc, rest); either part may be empty.
If unc is not empty, it has the form '//host/mount' (or similar
using backslashes). unc+rest is always the input path.
Paths containing drive letters never have an UNC part.
"""
if p[1:2] == ':':
return '', p # Drive letter present
firstTwo = p[0:2]
if firstTwo == '//' or firstTwo == '\\\\':
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvv equivalent to drive letter
# \\machine\mountpoint\directories...
# directory ^^^^^^^^^^^^^^^
normp = normcase(p)
index = normp.find('\\', 2)
if index == -1:
##raise RuntimeError, 'illegal UNC path: "' + p + '"'
return ("", p)
index = normp.find('\\', index + 1)
if index == -1:
index = len(p)
return p[:index], p[index:]
return '', p
# Split a path in head (everything up to the last '/') and tail (the
# rest). After the trailing '/' is stripped, the invariant
# join(head, tail) == p holds.
# The resulting head won't end in '/' unless it is the root.
def split(p):
"""Split a pathname.
Return tuple (head, tail) where tail is everything after the final slash.
Either part may be empty."""
d, p = splitdrive(p)
# set i to index beyond p's last slash
i = len(p)
while i and p[i-1] not in '/\\':
i = i - 1
head, tail = p[:i], p[i:] # now tail has no slashes
# remove trailing slashes from head, unless it's all slashes
head2 = head
while head2 and head2[-1] in '/\\':
head2 = head2[:-1]
head = head2 or head
return d + head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Return the tail (basename) part of a path.
def basename(p):
"""Returns the final component of a pathname"""
return split(p)[1]
# Return the head (dirname) part of a path.
def dirname(p):
"""Returns the directory component of a pathname"""
return split(p)[0]
# Is a path a symbolic link?
# This will always return false on systems where posix.lstat doesn't exist.
def islink(path):
"""Test for symbolic link.
On WindowsNT/95 and OS/2 always returns false
"""
return False
# alias exists to lexists
lexists = exists
# Is a path a mount point? Either a root (with or without drive letter)
# or an UNC path with at most a / or \ after the mount point.
def ismount(path):
"""Test whether a path is a mount point (defined as root of drive)"""
unc, rest = splitunc(path)
if unc:
return rest in ("", "/", "\\")
p = splitdrive(path)[1]
return len(p) == 1 and p[0] in '/\\'
# Directory tree walk.
# For each directory under top (including top itself, but excluding
# '.' and '..'), func(arg, dirname, filenames) is called, where
# dirname is the name of the directory and filenames is the list
# of files (and subdirectories etc.) in the directory.
# The func may modify the filenames list, to implement a filter,
# or to impose a different order of visiting.
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.",
stacklevel=2)
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = os.environ['HOME']
elif 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif not 'HOMEPATH' in os.environ:
return path
else:
try:
drive = os.environ['HOMEDRIVE']
except KeyError:
drive = ''
userhome = join(drive, os.environ['HOMEPATH'])
if i != 1: #~user
userhome = join(dirname(userhome), path[1:i])
return userhome + path[i:]
# Expand paths containing shell variable substitutions.
# The following rules apply:
# - no expansion within single quotes
# - '$$' is translated into '$'
# - '%%' is translated into '%' if '%%' are not seen in %var1%%var2%
# - ${varname} is accepted.
# - $varname is accepted.
# - %varname% is accepted.
# - varnames can be made out of letters, digits and the characters '_-'
# (though is not verified in the ${varname} and %varname% cases)
# XXX With COMMAND.COM you can use any characters in a variable name,
# XXX except '^|<>='.
def expandvars(path):
"""Expand shell variables of the forms $var, ${var} and %var%.
Unknown variables are left unchanged."""
if '$' not in path and '%' not in path:
return path
import string
varchars = string.ascii_letters + string.digits + '_-'
res = ''
index = 0
pathlen = len(path)
while index < pathlen:
c = path[index]
if c == '\'': # no expansion within single quotes
path = path[index + 1:]
pathlen = len(path)
try:
index = path.index('\'')
res = res + '\'' + path[:index + 1]
except ValueError:
res = res + path
index = pathlen - 1
elif c == '%': # variable or '%'
if path[index + 1:index + 2] == '%':
res = res + c
index = index + 1
else:
path = path[index+1:]
pathlen = len(path)
try:
index = path.index('%')
except ValueError:
res = res + '%' + path
index = pathlen - 1
else:
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '%' + var + '%'
elif c == '$': # variable or '$$'
if path[index + 1:index + 2] == '$':
res = res + c
index = index + 1
elif path[index + 1:index + 2] == '{':
path = path[index+2:]
pathlen = len(path)
try:
index = path.index('}')
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '${' + var + '}'
except ValueError:
res = res + '${' + path
index = pathlen - 1
else:
var = ''
index = index + 1
c = path[index:index + 1]
while c != '' and c in varchars:
var = var + c
index = index + 1
c = path[index:index + 1]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '$' + var
if c != '':
index = index - 1
else:
res = res + c
index = index + 1
return res
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B.
# Previously, this function also truncated pathnames to 8+3 format,
# but as this module is called "ntpath", that's obviously wrong!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
# Preserve unicode (if path is unicode)
backslash, dot = (u'\\', u'.') if isinstance(path, unicode) else ('\\', '.')
if path.startswith(('\\\\.\\', '\\\\?\\')):
# in the case of paths with these prefixes:
# \\.\ -> device names
# \\?\ -> literal paths
# do not do any normalization, but return the path unchanged
return path
path = path.replace("/", "\\")
prefix, path = splitdrive(path)
# We need to be careful here. If the prefix is empty, and the path starts
# with a backslash, it could either be an absolute path on the current
# drive (\dir1\dir2\file) or a UNC filename (\\server\mount\dir1\file). It
# is therefore imperative NOT to collapse multiple backslashes blindly in
# that case.
# The code below preserves multiple backslashes when there is no drive
# letter. This means that the invalid filename \\\a\b is preserved
# unchanged, where a\\\b is normalised to a\b. It's not clear that there
# is any better behaviour for such edge cases.
if prefix == '':
# No drive letter - preserve initial backslashes
while path[:1] == "\\":
prefix = prefix + backslash
path = path[1:]
else:
# We have a drive letter - collapse initial backslashes
if path.startswith("\\"):
prefix = prefix + backslash
path = path.lstrip("\\")
comps = path.split("\\")
i = 0
while i < len(comps):
if comps[i] in ('.', ''):
del comps[i]
elif comps[i] == '..':
if i > 0 and comps[i-1] != '..':
del comps[i-1:i+1]
i -= 1
elif i == 0 and prefix.endswith("\\"):
del comps[i]
else:
i += 1
else:
i += 1
# If the path is now empty, substitute '.'
if not prefix and not comps:
comps.append(dot)
return prefix + backslash.join(comps)
# Return an absolute path.
try:
from nt import _getfullpathname
except ImportError: # not running on Windows - mock up something sensible
def abspath(path):
"""Return the absolute version of a path."""
if not isabs(path):
if isinstance(path, unicode):
cwd = os.getcwdu()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
else: # use native Windows method on Windows
def abspath(path):
"""Return the absolute version of a path."""
if path: # Empty path must return current working directory.
try:
path = _getfullpathname(path)
except WindowsError:
pass # Bad path - return unchanged.
elif isinstance(path, unicode):
path = os.getcwdu()
else:
path = os.getcwd()
return normpath(path)
# realpath is a no-op on systems without islink support
realpath = abspath
# Win9x family and earlier have no Unicode filename support.
supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and
sys.getwindowsversion()[3] >= 2)
def _abspath_split(path):
abs = abspath(normpath(path))
prefix, rest = splitunc(abs)
is_unc = bool(prefix)
if not is_unc:
prefix, rest = splitdrive(abs)
return is_unc, prefix, [x for x in rest.split(sep) if x]
def relpath(path, start=curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_is_unc, start_prefix, start_list = _abspath_split(start)
path_is_unc, path_prefix, path_list = _abspath_split(path)
if path_is_unc ^ start_is_unc:
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
if path_prefix.lower() != start_prefix.lower():
if path_is_unc:
raise ValueError("path is on UNC root %s, start on UNC root %s"
% (path_prefix, start_prefix))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_prefix, start_prefix))
# Work out how much of the filepath is shared by start and path.
i = 0
for e1, e2 in zip(start_list, path_list):
if e1.lower() != e2.lower():
break
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)

View File

@ -1,759 +0,0 @@
r"""OS routines for Mac, NT, or Posix depending on what system we're on.
This exports:
- all functions from posix, nt, os2, or ce, e.g. unlink, stat, etc.
- os.path is one of the modules posixpath, or ntpath
- os.name is 'posix', 'nt', 'os2', 'ce' or 'riscos'
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator ('.' or '/')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import sys, errno
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "extsep", "pathsep", "linesep",
"defpath", "name", "path", "devnull",
"SEEK_SET", "SEEK_CUR", "SEEK_END"]
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
except ImportError:
pass
import posixpath as path
import posix
__all__.extend(_get_exports_list(posix))
del posix
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
elif 'os2' in _names:
name = 'os2'
linesep = '\r\n'
from os2 import *
try:
from os2 import _exit
except ImportError:
pass
if sys.version.find('EMX GCC') == -1:
import ntpath as path
else:
import os2emxpath as path
from _emx_link import link
import os2
__all__.extend(_get_exports_list(os2))
del os2
elif 'ce' in _names:
name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
elif 'riscos' in _names:
name = 'riscos'
linesep = '\n'
from riscos import *
try:
from riscos import _exit
except ImportError:
pass
import riscospath as path
import riscos
__all__.extend(_get_exports_list(riscos))
del riscos
else:
raise ImportError, 'no os specific module found'
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
#'
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0777):
"""makedirs(path [, mode=0777])
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist. This is
recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode)
except OSError, e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
if tail == curdir: # xxx/newdir/. exists if xxx/newdir exists
return
mkdir(name, mode)
def removedirs(name):
"""removedirs(path)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except error:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except error:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune
the search, or to impose a specific order of visiting. Modifying
dirnames when topdown is false is ineffective, since the directories in
dirnames have already been generated by the time dirnames itself is
generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an os.error instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print root, "consumes",
print sum([getsize(join(root, name)) for name in files]),
print "bytes in", len(files), "non-directory files"
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
islink, join, isdir = path.islink, path.join, path.isdir
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = listdir(top)
except error, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = join(top, name)
if followlinks or not islink(new_path):
for x in walk(new_path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
__all__.append("walk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execvp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
func = execve
argrest = (args, env)
else:
func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
func(file, *argrest)
return
if 'PATH' in env:
envpath = env['PATH']
else:
envpath = defpath
PATH = envpath.split(pathsep)
saved_exc = None
saved_tb = None
for dir in PATH:
fullname = path.join(dir, file)
try:
func(fullname, *argrest)
except error, e:
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise error, saved_exc, saved_tb
raise error, e, tb
# Change environ to automatically call putenv() if it exists
try:
# This will fail if there's no putenv
putenv
except NameError:
pass
else:
import UserDict
# Fake unsetenv() for Windows
# not sure about os2 here but
# I'm guessing they are the same.
if name in ('os2', 'nt'):
def unsetenv(key):
putenv(key, "")
if name == "riscos":
# On RISC OS, all env access goes through getenv and putenv
from riscosenviron import _Environ
elif name in ('os2', 'nt'): # Where Env Var Names Must Be UPPERCASE
# But we store them as upper case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
data = self.data
for k, v in environ.items():
data[k.upper()] = v
def __setitem__(self, key, item):
putenv(key, item)
self.data[key.upper()] = item
def __getitem__(self, key):
return self.data[key.upper()]
try:
unsetenv
except NameError:
def __delitem__(self, key):
del self.data[key.upper()]
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key.upper()]
def clear(self):
for key in self.data.keys():
unsetenv(key)
del self.data[key]
def pop(self, key, *args):
unsetenv(key)
return self.data.pop(key.upper(), *args)
def has_key(self, key):
return key.upper() in self.data
def __contains__(self, key):
return key.upper() in self.data
def get(self, key, failobj=None):
return self.data.get(key.upper(), failobj)
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
def copy(self):
return dict(self)
else: # Where Env Var Names Can Be Mixed Case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
self.data = environ
def __setitem__(self, key, item):
putenv(key, item)
self.data[key] = item
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
try:
unsetenv
except NameError:
pass
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key]
def clear(self):
for key in self.data.keys():
unsetenv(key)
del self.data[key]
def pop(self, key, *args):
unsetenv(key)
return self.data.pop(key, *args)
def copy(self):
return dict(self)
environ = _Environ(environ)
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default."""
return environ.get(key, default)
__all__.append("getenv")
def _exists(name):
return name in globals()
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise error, "Not stopped, signaled or exited???"
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnv", "spawnve", "spawnl", "spawnle",])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnvp", "spawnvpe", "spawnlp", "spawnlpe",])
# Supply popen2 etc. (for Unix)
if _exists("fork"):
if not _exists("popen2"):
def popen2(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout) are returned."""
import warnings
msg = "os.popen2 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
close_fds=True)
return p.stdin, p.stdout
__all__.append("popen2")
if not _exists("popen3"):
def popen3(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout, child_stderr) are returned."""
import warnings
msg = "os.popen3 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
stderr=PIPE, close_fds=True)
return p.stdin, p.stdout, p.stderr
__all__.append("popen3")
if not _exists("popen4"):
def popen4(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout_stderr) are returned."""
import warnings
msg = "os.popen4 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
stderr=subprocess.STDOUT, close_fds=True)
return p.stdin, p.stdout
__all__.append("popen4")
import copy_reg as _copy_reg
def _make_stat_result(tup, dict):
return stat_result(tup, dict)
def _pickle_stat_result(sr):
(type, args) = sr.__reduce__()
return (_make_stat_result, args)
try:
_copy_reg.pickle(stat_result, _pickle_stat_result, _make_stat_result)
except NameError: # stat_result may not exist
pass
def _make_statvfs_result(tup, dict):
return statvfs_result(tup, dict)
def _pickle_statvfs_result(sr):
(type, args) = sr.__reduce__()
return (_make_statvfs_result, args)
try:
_copy_reg.pickle(statvfs_result, _pickle_statvfs_result,
_make_statvfs_result)
except NameError: # statvfs_result may not exist
pass
if not _exists("urandom"):
def urandom(n):
"""urandom(n) -> str
Return a string of n random bytes suitable for cryptographic use.
"""
try:
_urandomfd = open("/dev/urandom", O_RDONLY)
except (OSError, IOError):
raise NotImplementedError("/dev/urandom (or equivalent) not found")
try:
bs = b""
while n > len(bs):
bs += read(_urandomfd, n - len(bs))
finally:
close(_urandomfd)
return bs

View File

@ -1,94 +0,0 @@
"""Constants/functions for interpreting results of os.stat() and os.lstat().
Suggested usage: from stat import *
"""
# Indices for stat struct members in the tuple returned by os.stat()
ST_MODE = 0
ST_INO = 1
ST_DEV = 2
ST_NLINK = 3
ST_UID = 4
ST_GID = 5
ST_SIZE = 6
ST_ATIME = 7
ST_MTIME = 8
ST_CTIME = 9
# Extract bits from the mode
def S_IMODE(mode):
return mode & 07777
def S_IFMT(mode):
return mode & 0170000
# Constants used as S_IFMT() for various file types
# (not all are implemented on all systems)
S_IFDIR = 0040000
S_IFCHR = 0020000
S_IFBLK = 0060000
S_IFREG = 0100000
S_IFIFO = 0010000
S_IFLNK = 0120000
S_IFSOCK = 0140000
# Functions to test for each file type
def S_ISDIR(mode):
return S_IFMT(mode) == S_IFDIR
def S_ISCHR(mode):
return S_IFMT(mode) == S_IFCHR
def S_ISBLK(mode):
return S_IFMT(mode) == S_IFBLK
def S_ISREG(mode):
return S_IFMT(mode) == S_IFREG
def S_ISFIFO(mode):
return S_IFMT(mode) == S_IFIFO
def S_ISLNK(mode):
return S_IFMT(mode) == S_IFLNK
def S_ISSOCK(mode):
return S_IFMT(mode) == S_IFSOCK
# Names for permission bits
S_ISUID = 04000
S_ISGID = 02000
S_ENFMT = S_ISGID
S_ISVTX = 01000
S_IREAD = 00400
S_IWRITE = 00200
S_IEXEC = 00100
S_IRWXU = 00700
S_IRUSR = 00400
S_IWUSR = 00200
S_IXUSR = 00100
S_IRWXG = 00070
S_IRGRP = 00040
S_IWGRP = 00020
S_IXGRP = 00010
S_IRWXO = 00007
S_IROTH = 00004
S_IWOTH = 00002
S_IXOTH = 00001
# Names for file flags
UF_NODUMP = 0x00000001
UF_IMMUTABLE = 0x00000002
UF_APPEND = 0x00000004
UF_OPAQUE = 0x00000008
UF_NOUNLINK = 0x00000010
SF_ARCHIVED = 0x00010000
SF_IMMUTABLE = 0x00020000
SF_APPEND = 0x00040000
SF_NOUNLINK = 0x00100000
SF_SNAPSHOT = 0x00200000

File diff suppressed because it is too large Load Diff

View File

@ -1,989 +0,0 @@
"""Thread module emulating a subset of Java's threading model."""
import sys as _sys
try:
import thread
except ImportError:
del _sys.modules[__name__]
raise
import warnings
from time import time as _time, sleep as _sleep
from traceback import format_exc as _format_exc
from collections import deque
# Note regarding PEP 8 compliant aliases
# This threading model was originally inspired by Java, and inherited
# the convention of camelCase function and method names from that
# language. While those names are not in any imminent danger of being
# deprecated, starting with Python 2.6, the module now provides a
# PEP 8 compliant alias for any such method name.
# Using the new PEP 8 compliant names also facilitates substitution
# with the multiprocessing module, which doesn't provide the old
# Java inspired names.
# Rename some stuff so "from threading import *" is safe
__all__ = ['activeCount', 'active_count', 'Condition', 'currentThread',
'current_thread', 'enumerate', 'Event',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread',
'Timer', 'setprofile', 'settrace', 'local', 'stack_size']
_start_new_thread = thread.start_new_thread
_allocate_lock = thread.allocate_lock
_get_ident = thread.get_ident
ThreadError = thread.error
del thread
# sys.exc_clear is used to work around the fact that except blocks
# don't fully clear the exception until 3.0.
warnings.filterwarnings('ignore', category=DeprecationWarning,
module='threading', message='sys.exc_clear')
# Debug support (adapted from ihooks.py).
# All the major classes here derive from _Verbose. We force that to
# be a new-style class so that all the major classes here are new-style.
# This helps debugging (type(instance) is more revealing for instances
# of new-style classes).
_VERBOSE = False
if __debug__:
class _Verbose(object):
def __init__(self, verbose=None):
if verbose is None:
verbose = _VERBOSE
self.__verbose = verbose
def _note(self, format, *args):
if self.__verbose:
format = format % args
# Issue #4188: calling current_thread() can incur an infinite
# recursion if it has to create a DummyThread on the fly.
ident = _get_ident()
try:
name = _active[ident].name
except KeyError:
name = "<OS thread %d>" % ident
format = "%s: %s\n" % (name, format)
_sys.stderr.write(format)
else:
# Disable this when using "python -O"
class _Verbose(object):
def __init__(self, verbose=None):
pass
def _note(self, *args):
pass
# Support for profile and trace hooks
_profile_hook = None
_trace_hook = None
def setprofile(func):
global _profile_hook
_profile_hook = func
def settrace(func):
global _trace_hook
_trace_hook = func
# Synchronization classes
Lock = _allocate_lock
def RLock(*args, **kwargs):
return _RLock(*args, **kwargs)
class _RLock(_Verbose):
def __init__(self, verbose=None):
_Verbose.__init__(self, verbose)
self.__block = _allocate_lock()
self.__owner = None
self.__count = 0
def __repr__(self):
owner = self.__owner
try:
owner = _active[owner].name
except KeyError:
pass
return "<%s owner=%r count=%d>" % (
self.__class__.__name__, owner, self.__count)
def acquire(self, blocking=1):
me = _get_ident()
if self.__owner == me:
self.__count = self.__count + 1
if __debug__:
self._note("%s.acquire(%s): recursive success", self, blocking)
return 1
rc = self.__block.acquire(blocking)
if rc:
self.__owner = me
self.__count = 1
if __debug__:
self._note("%s.acquire(%s): initial success", self, blocking)
else:
if __debug__:
self._note("%s.acquire(%s): failure", self, blocking)
return rc
__enter__ = acquire
def release(self):
if self.__owner != _get_ident():
raise RuntimeError("cannot release un-acquired lock")
self.__count = count = self.__count - 1
if not count:
self.__owner = None
self.__block.release()
if __debug__:
self._note("%s.release(): final release", self)
else:
if __debug__:
self._note("%s.release(): non-final release", self)
def __exit__(self, t, v, tb):
self.release()
# Internal methods used by condition variables
def _acquire_restore(self, count_owner):
count, owner = count_owner
self.__block.acquire()
self.__count = count
self.__owner = owner
if __debug__:
self._note("%s._acquire_restore()", self)
def _release_save(self):
if __debug__:
self._note("%s._release_save()", self)
count = self.__count
self.__count = 0
owner = self.__owner
self.__owner = None
self.__block.release()
return (count, owner)
def _is_owned(self):
return self.__owner == _get_ident()
def Condition(*args, **kwargs):
return _Condition(*args, **kwargs)
class _Condition(_Verbose):
def __init__(self, lock=None, verbose=None):
_Verbose.__init__(self, verbose)
if lock is None:
lock = RLock()
self.__lock = lock
# Export the lock's acquire() and release() methods
self.acquire = lock.acquire
self.release = lock.release
# If the lock defines _release_save() and/or _acquire_restore(),
# these override the default implementations (which just call
# release() and acquire() on the lock). Ditto for _is_owned().
try:
self._release_save = lock._release_save
except AttributeError:
pass
try:
self._acquire_restore = lock._acquire_restore
except AttributeError:
pass
try:
self._is_owned = lock._is_owned
except AttributeError:
pass
self.__waiters = []
def __enter__(self):
return self.__lock.__enter__()
def __exit__(self, *args):
return self.__lock.__exit__(*args)
def __repr__(self):
return "<Condition(%s, %d)>" % (self.__lock, len(self.__waiters))
def _release_save(self):
self.__lock.release() # No state to save
def _acquire_restore(self, x):
self.__lock.acquire() # Ignore saved state
def _is_owned(self):
# Return True if lock is owned by current_thread.
# This method is called only if __lock doesn't have _is_owned().
if self.__lock.acquire(0):
self.__lock.release()
return False
else:
return True
def wait(self, timeout=None):
if not self._is_owned():
raise RuntimeError("cannot wait on un-acquired lock")
waiter = _allocate_lock()
waiter.acquire()
self.__waiters.append(waiter)
saved_state = self._release_save()
try: # restore state no matter what (e.g., KeyboardInterrupt)
if timeout is None:
waiter.acquire()
if __debug__:
self._note("%s.wait(): got it", self)
else:
# Balancing act: We can't afford a pure busy loop, so we
# have to sleep; but if we sleep the whole timeout time,
# we'll be unresponsive. The scheme here sleeps very
# little at first, longer as time goes on, but never longer
# than 20 times per second (or the timeout time remaining).
endtime = _time() + timeout
delay = 0.0005 # 500 us -> initial delay of 1 ms
while True:
gotit = waiter.acquire(0)
if gotit:
break
remaining = endtime - _time()
if remaining <= 0:
break
delay = min(delay * 2, remaining, .05)
_sleep(delay)
if not gotit:
if __debug__:
self._note("%s.wait(%s): timed out", self, timeout)
try:
self.__waiters.remove(waiter)
except ValueError:
pass
else:
if __debug__:
self._note("%s.wait(%s): got it", self, timeout)
finally:
self._acquire_restore(saved_state)
def notify(self, n=1):
if not self._is_owned():
raise RuntimeError("cannot notify on un-acquired lock")
__waiters = self.__waiters
waiters = __waiters[:n]
if not waiters:
if __debug__:
self._note("%s.notify(): no waiters", self)
return
self._note("%s.notify(): notifying %d waiter%s", self, n,
n!=1 and "s" or "")
for waiter in waiters:
waiter.release()
try:
__waiters.remove(waiter)
except ValueError:
pass
def notifyAll(self):
self.notify(len(self.__waiters))
notify_all = notifyAll
def Semaphore(*args, **kwargs):
return _Semaphore(*args, **kwargs)
class _Semaphore(_Verbose):
# After Tim Peters' semaphore class, but not quite the same (no maximum)
def __init__(self, value=1, verbose=None):
if value < 0:
raise ValueError("semaphore initial value must be >= 0")
_Verbose.__init__(self, verbose)
self.__cond = Condition(Lock())
self.__value = value
def acquire(self, blocking=1):
rc = False
self.__cond.acquire()
while self.__value == 0:
if not blocking:
break
if __debug__:
self._note("%s.acquire(%s): blocked waiting, value=%s",
self, blocking, self.__value)
self.__cond.wait()
else:
self.__value = self.__value - 1
if __debug__:
self._note("%s.acquire: success, value=%s",
self, self.__value)
rc = True
self.__cond.release()
return rc
__enter__ = acquire
def release(self):
self.__cond.acquire()
self.__value = self.__value + 1
if __debug__:
self._note("%s.release: success, value=%s",
self, self.__value)
self.__cond.notify()
self.__cond.release()
def __exit__(self, t, v, tb):
self.release()
def BoundedSemaphore(*args, **kwargs):
return _BoundedSemaphore(*args, **kwargs)
class _BoundedSemaphore(_Semaphore):
"""Semaphore that checks that # releases is <= # acquires"""
def __init__(self, value=1, verbose=None):
_Semaphore.__init__(self, value, verbose)
self._initial_value = value
def release(self):
if self._Semaphore__value >= self._initial_value:
raise ValueError, "Semaphore released too many times"
return _Semaphore.release(self)
def Event(*args, **kwargs):
return _Event(*args, **kwargs)
class _Event(_Verbose):
# After Tim Peters' event class (without is_posted())
def __init__(self, verbose=None):
_Verbose.__init__(self, verbose)
self.__cond = Condition(Lock())
self.__flag = False
def _reset_internal_locks(self):
# private! called by Thread._reset_internal_locks by _after_fork()
self.__cond.__init__()
def isSet(self):
return self.__flag
is_set = isSet
def set(self):
self.__cond.acquire()
try:
self.__flag = True
self.__cond.notify_all()
finally:
self.__cond.release()
def clear(self):
self.__cond.acquire()
try:
self.__flag = False
finally:
self.__cond.release()
def wait(self, timeout=None):
self.__cond.acquire()
try:
if not self.__flag:
self.__cond.wait(timeout)
return self.__flag
finally:
self.__cond.release()
# Helper to generate new thread names
_counter = 0
def _newname(template="Thread-%d"):
global _counter
_counter = _counter + 1
return template % _counter
# Active thread administration
_active_limbo_lock = _allocate_lock()
_active = {} # maps thread id to Thread object
_limbo = {}
# Main class for threads
class Thread(_Verbose):
__initialized = False
# Need to store a reference to sys.exc_info for printing
# out exceptions when a thread tries to use a global var. during interp.
# shutdown and thus raises an exception about trying to perform some
# operation on/with a NoneType
__exc_info = _sys.exc_info
# Keep sys.exc_clear too to clear the exception just before
# allowing .join() to return.
__exc_clear = _sys.exc_clear
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, verbose=None):
assert group is None, "group argument must be None for now"
_Verbose.__init__(self, verbose)
if kwargs is None:
kwargs = {}
self.__target = target
self.__name = str(name or _newname())
self.__args = args
self.__kwargs = kwargs
self.__daemonic = self._set_daemon()
self.__ident = None
self.__started = Event()
self.__stopped = False
self.__block = Condition(Lock())
self.__initialized = True
# sys.stderr is not stored in the class like
# sys.exc_info since it can be changed between instances
self.__stderr = _sys.stderr
def _reset_internal_locks(self):
# private! Called by _after_fork() to reset our internal locks as
# they may be in an invalid state leading to a deadlock or crash.
if hasattr(self, '_Thread__block'): # DummyThread deletes self.__block
self.__block.__init__()
self.__started._reset_internal_locks()
@property
def _block(self):
# used by a unittest
return self.__block
def _set_daemon(self):
# Overridden in _MainThread and _DummyThread
return current_thread().daemon
def __repr__(self):
assert self.__initialized, "Thread.__init__() was not called"
status = "initial"
if self.__started.is_set():
status = "started"
if self.__stopped:
status = "stopped"
if self.__daemonic:
status += " daemon"
if self.__ident is not None:
status += " %s" % self.__ident
return "<%s(%s, %s)>" % (self.__class__.__name__, self.__name, status)
def start(self):
if not self.__initialized:
raise RuntimeError("thread.__init__() not called")
if self.__started.is_set():
raise RuntimeError("threads can only be started once")
if __debug__:
self._note("%s.start(): starting thread", self)
with _active_limbo_lock:
_limbo[self] = self
try:
_start_new_thread(self.__bootstrap, ())
except Exception:
with _active_limbo_lock:
del _limbo[self]
raise
self.__started.wait()
def run(self):
try:
if self.__target:
self.__target(*self.__args, **self.__kwargs)
finally:
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self.__target, self.__args, self.__kwargs
def __bootstrap(self):
# Wrapper around the real bootstrap code that ignores
# exceptions during interpreter cleanup. Those typically
# happen when a daemon thread wakes up at an unfortunate
# moment, finds the world around it destroyed, and raises some
# random exception *** while trying to report the exception in
# __bootstrap_inner() below ***. Those random exceptions
# don't help anybody, and they confuse users, so we suppress
# them. We suppress them only when it appears that the world
# indeed has already been destroyed, so that exceptions in
# __bootstrap_inner() during normal business hours are properly
# reported. Also, we only suppress them for daemonic threads;
# if a non-daemonic encounters this, something else is wrong.
try:
self.__bootstrap_inner()
except:
if self.__daemonic and _sys is None:
return
raise
def _set_ident(self):
self.__ident = _get_ident()
def __bootstrap_inner(self):
try:
self._set_ident()
self.__started.set()
with _active_limbo_lock:
_active[self.__ident] = self
del _limbo[self]
if __debug__:
self._note("%s.__bootstrap(): thread started", self)
if _trace_hook:
self._note("%s.__bootstrap(): registering trace hook", self)
_sys.settrace(_trace_hook)
if _profile_hook:
self._note("%s.__bootstrap(): registering profile hook", self)
_sys.setprofile(_profile_hook)
try:
self.run()
except SystemExit:
if __debug__:
self._note("%s.__bootstrap(): raised SystemExit", self)
except:
if __debug__:
self._note("%s.__bootstrap(): unhandled exception", self)
# If sys.stderr is no more (most likely from interpreter
# shutdown) use self.__stderr. Otherwise still use sys (as in
# _sys) in case sys.stderr was redefined since the creation of
# self.
if _sys:
_sys.stderr.write("Exception in thread %s:\n%s\n" %
(self.name, _format_exc()))
else:
# Do the best job possible w/o a huge amt. of code to
# approximate a traceback (code ideas from
# Lib/traceback.py)
exc_type, exc_value, exc_tb = self.__exc_info()
try:
print>>self.__stderr, (
"Exception in thread " + self.name +
" (most likely raised during interpreter shutdown):")
print>>self.__stderr, (
"Traceback (most recent call last):")
while exc_tb:
print>>self.__stderr, (
' File "%s", line %s, in %s' %
(exc_tb.tb_frame.f_code.co_filename,
exc_tb.tb_lineno,
exc_tb.tb_frame.f_code.co_name))
exc_tb = exc_tb.tb_next
print>>self.__stderr, ("%s: %s" % (exc_type, exc_value))
# Make sure that exc_tb gets deleted since it is a memory
# hog; deleting everything else is just for thoroughness
finally:
del exc_type, exc_value, exc_tb
else:
if __debug__:
self._note("%s.__bootstrap(): normal return", self)
finally:
# Prevent a race in
# test_threading.test_no_refcycle_through_target when
# the exception keeps the target alive past when we
# assert that it's dead.
self.__exc_clear()
finally:
with _active_limbo_lock:
self.__stop()
try:
# We don't call self.__delete() because it also
# grabs _active_limbo_lock.
del _active[_get_ident()]
except:
pass
def __stop(self):
self.__block.acquire()
self.__stopped = True
self.__block.notify_all()
self.__block.release()
def __delete(self):
"Remove current thread from the dict of currently running threads."
# Notes about running with dummy_thread:
#
# Must take care to not raise an exception if dummy_thread is being
# used (and thus this module is being used as an instance of
# dummy_threading). dummy_thread.get_ident() always returns -1 since
# there is only one thread if dummy_thread is being used. Thus
# len(_active) is always <= 1 here, and any Thread instance created
# overwrites the (if any) thread currently registered in _active.
#
# An instance of _MainThread is always created by 'threading'. This
# gets overwritten the instant an instance of Thread is created; both
# threads return -1 from dummy_thread.get_ident() and thus have the
# same key in the dict. So when the _MainThread instance created by
# 'threading' tries to clean itself up when atexit calls this method
# it gets a KeyError if another Thread instance was created.
#
# This all means that KeyError from trying to delete something from
# _active if dummy_threading is being used is a red herring. But
# since it isn't if dummy_threading is *not* being used then don't
# hide the exception.
try:
with _active_limbo_lock:
del _active[_get_ident()]
# There must not be any python code between the previous line
# and after the lock is released. Otherwise a tracing function
# could try to acquire the lock again in the same thread, (in
# current_thread()), and would block.
except KeyError:
if 'dummy_threading' not in _sys.modules:
raise
def join(self, timeout=None):
if not self.__initialized:
raise RuntimeError("Thread.__init__() not called")
if not self.__started.is_set():
raise RuntimeError("cannot join thread before it is started")
if self is current_thread():
raise RuntimeError("cannot join current thread")
if __debug__:
if not self.__stopped:
self._note("%s.join(): waiting until thread stops", self)
self.__block.acquire()
try:
if timeout is None:
while not self.__stopped:
self.__block.wait()
if __debug__:
self._note("%s.join(): thread stopped", self)
else:
deadline = _time() + timeout
while not self.__stopped:
delay = deadline - _time()
if delay <= 0:
if __debug__:
self._note("%s.join(): timed out", self)
break
self.__block.wait(delay)
else:
if __debug__:
self._note("%s.join(): thread stopped", self)
finally:
self.__block.release()
@property
def name(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__name
@name.setter
def name(self, name):
assert self.__initialized, "Thread.__init__() not called"
self.__name = str(name)
@property
def ident(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__ident
def isAlive(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__started.is_set() and not self.__stopped
is_alive = isAlive
@property
def daemon(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__daemonic
@daemon.setter
def daemon(self, daemonic):
if not self.__initialized:
raise RuntimeError("Thread.__init__() not called")
if self.__started.is_set():
raise RuntimeError("cannot set daemon status of active thread");
self.__daemonic = daemonic
def isDaemon(self):
return self.daemon
def setDaemon(self, daemonic):
self.daemon = daemonic
def getName(self):
return self.name
def setName(self, name):
self.name = name
# The timer class was contributed by Itamar Shtull-Trauring
def Timer(*args, **kwargs):
return _Timer(*args, **kwargs)
class _Timer(Thread):
"""Call a function after a specified number of seconds:
t = Timer(30.0, f, args=[], kwargs={})
t.start()
t.cancel() # stop the timer's action if it's still waiting
"""
def __init__(self, interval, function, args=[], kwargs={}):
Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.finished = Event()
def cancel(self):
"""Stop the timer if it hasn't finished yet"""
self.finished.set()
def run(self):
self.finished.wait(self.interval)
if not self.finished.is_set():
self.function(*self.args, **self.kwargs)
self.finished.set()
# Special thread class to represent the main thread
# This is garbage collected through an exit handler
class _MainThread(Thread):
def __init__(self):
Thread.__init__(self, name="MainThread")
self._Thread__started.set()
self._set_ident()
with _active_limbo_lock:
_active[_get_ident()] = self
def _set_daemon(self):
return False
def _exitfunc(self):
self._Thread__stop()
t = _pickSomeNonDaemonThread()
if t:
if __debug__:
self._note("%s: waiting for other threads", self)
while t:
t.join()
t = _pickSomeNonDaemonThread()
if __debug__:
self._note("%s: exiting", self)
self._Thread__delete()
def _pickSomeNonDaemonThread():
for t in enumerate():
if not t.daemon and t.is_alive():
return t
return None
# Dummy thread class to represent threads not started here.
# These aren't garbage collected when they die, nor can they be waited for.
# If they invoke anything in threading.py that calls current_thread(), they
# leave an entry in the _active dict forever after.
# Their purpose is to return *something* from current_thread().
# They are marked as daemon threads so we won't wait for them
# when we exit (conform previous semantics).
class _DummyThread(Thread):
def __init__(self):
Thread.__init__(self, name=_newname("Dummy-%d"))
# Thread.__block consumes an OS-level locking primitive, which
# can never be used by a _DummyThread. Since a _DummyThread
# instance is immortal, that's bad, so release this resource.
del self._Thread__block
self._Thread__started.set()
self._set_ident()
with _active_limbo_lock:
_active[_get_ident()] = self
def _set_daemon(self):
return True
def join(self, timeout=None):
assert False, "cannot join a dummy thread"
# Global API functions
def currentThread():
try:
return _active[_get_ident()]
except KeyError:
##print "current_thread(): no current thread for", _get_ident()
return _DummyThread()
current_thread = currentThread
def activeCount():
with _active_limbo_lock:
return len(_active) + len(_limbo)
active_count = activeCount
def _enumerate():
# Same as enumerate(), but without the lock. Internal use only.
return _active.values() + _limbo.values()
def enumerate():
with _active_limbo_lock:
return _active.values() + _limbo.values()
from thread import stack_size
# Create the main thread object,
# and make it available for the interpreter
# (Py_Main) as threading._shutdown.
_shutdown = _MainThread()._exitfunc
# get thread-local implementation, either from the thread
# module, or from the python fallback
try:
from thread import _local as local
except ImportError:
from _threading_local import local
def _after_fork():
# This function is called by Python/ceval.c:PyEval_ReInitThreads which
# is called from PyOS_AfterFork. Here we cleanup threading module state
# that should not exist after a fork.
# Reset _active_limbo_lock, in case we forked while the lock was held
# by another (non-forked) thread. http://bugs.python.org/issue874900
global _active_limbo_lock
_active_limbo_lock = _allocate_lock()
# fork() only copied the current thread; clear references to others.
new_active = {}
current = current_thread()
with _active_limbo_lock:
for thread in _active.itervalues():
if thread is current:
# There is only one active thread. We reset the ident to
# its new value since it can have changed.
ident = _get_ident()
thread._Thread__ident = ident
# Any condition variables hanging off of the active thread may
# be in an invalid state, so we reinitialize them.
if hasattr(thread, '_reset_internal_locks'):
thread._reset_internal_locks()
new_active[ident] = thread
else:
# All the others are already stopped.
# We don't call _Thread__stop() because it tries to acquire
# thread._Thread__block which could also have been held while
# we forked.
thread._Thread__stopped = True
_limbo.clear()
_active.clear()
_active.update(new_active)
assert len(_active) == 1
# Self-test code
def _test():
class BoundedQueue(_Verbose):
def __init__(self, limit):
_Verbose.__init__(self)
self.mon = RLock()
self.rc = Condition(self.mon)
self.wc = Condition(self.mon)
self.limit = limit
self.queue = deque()
def put(self, item):
self.mon.acquire()
while len(self.queue) >= self.limit:
self._note("put(%s): queue full", item)
self.wc.wait()
self.queue.append(item)
self._note("put(%s): appended, length now %d",
item, len(self.queue))
self.rc.notify()
self.mon.release()
def get(self):
self.mon.acquire()
while not self.queue:
self._note("get(): queue empty")
self.rc.wait()
item = self.queue.popleft()
self._note("get(): got %s, %d left", item, len(self.queue))
self.wc.notify()
self.mon.release()
return item
class ProducerThread(Thread):
def __init__(self, queue, quota):
Thread.__init__(self, name="Producer")
self.queue = queue
self.quota = quota
def run(self):
from random import random
counter = 0
while counter < self.quota:
counter = counter + 1
self.queue.put("%s.%d" % (self.name, counter))
_sleep(random() * 0.00001)
class ConsumerThread(Thread):
def __init__(self, queue, count):
Thread.__init__(self, name="Consumer")
self.queue = queue
self.count = count
def run(self):
while self.count > 0:
item = self.queue.get()
print item
self.count = self.count - 1
NP = 3
QL = 4
NI = 5
Q = BoundedQueue(QL)
P = []
for i in range(NP):
t = ProducerThread(Q, NI)
t.name = ("Producer-%d" % (i+1))
P.append(t)
C = ConsumerThread(Q, NI*NP)
for t in P:
t.start()
_sleep(0.000001)
C.start()
for t in P:
t.join()
C.join()
if __name__ == '__main__':
_test()

View File

@ -1,319 +0,0 @@
"""Extract, format and print information about Python stack traces."""
import linecache
import sys
import types
__all__ = ['extract_stack', 'extract_tb', 'format_exception',
'format_exception_only', 'format_list', 'format_stack',
'format_tb', 'print_exc', 'format_exc', 'print_exception',
'print_last', 'print_stack', 'print_tb', 'tb_lineno']
def _print(file, str='', terminator='\n'):
file.write(str+terminator)
def print_list(extracted_list, file=None):
"""Print the list of tuples as returned by extract_tb() or
extract_stack() as a formatted stack trace to the given file."""
if file is None:
file = sys.stderr
for filename, lineno, name, line in extracted_list:
_print(file,
' File "%s", line %d, in %s' % (filename,lineno,name))
if line:
_print(file, ' %s' % line.strip())
def format_list(extracted_list):
"""Format a list of traceback entry tuples for printing.
Given a list of tuples as returned by extract_tb() or
extract_stack(), return a list of strings ready for printing.
Each string in the resulting list corresponds to the item with the
same index in the argument list. Each string ends in a newline;
the strings may contain internal newlines as well, for those items
whose source text line is not None.
"""
list = []
for filename, lineno, name, line in extracted_list:
item = ' File "%s", line %d, in %s\n' % (filename,lineno,name)
if line:
item = item + ' %s\n' % line.strip()
list.append(item)
return list
def print_tb(tb, limit=None, file=None):
"""Print up to 'limit' stack trace entries from the traceback 'tb'.
If 'limit' is omitted or None, all entries are printed. If 'file'
is omitted or None, the output goes to sys.stderr; otherwise
'file' should be an open file or file-like object with a write()
method.
"""
if file is None:
file = sys.stderr
if limit is None:
if hasattr(sys, 'tracebacklimit'):
limit = sys.tracebacklimit
n = 0
while tb is not None and (limit is None or n < limit):
f = tb.tb_frame
lineno = tb.tb_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
_print(file,
' File "%s", line %d, in %s' % (filename, lineno, name))
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line: _print(file, ' ' + line.strip())
tb = tb.tb_next
n = n+1
def format_tb(tb, limit = None):
"""A shorthand for 'format_list(extract_stack(f, limit))."""
return format_list(extract_tb(tb, limit))
def extract_tb(tb, limit = None):
"""Return list of up to limit pre-processed entries from traceback.
This is useful for alternate formatting of stack traces. If
'limit' is omitted or None, all entries are extracted. A
pre-processed stack trace entry is a quadruple (filename, line
number, function name, text) representing the information that is
usually printed for a stack trace. The text is a string with
leading and trailing whitespace stripped; if the source is not
available it is None.
"""
if limit is None:
if hasattr(sys, 'tracebacklimit'):
limit = sys.tracebacklimit
list = []
n = 0
while tb is not None and (limit is None or n < limit):
f = tb.tb_frame
lineno = tb.tb_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line: line = line.strip()
else: line = None
list.append((filename, lineno, name, line))
tb = tb.tb_next
n = n+1
return list
def print_exception(etype, value, tb, limit=None, file=None):
"""Print exception up to 'limit' stack trace entries from 'tb' to 'file'.
This differs from print_tb() in the following ways: (1) if
traceback is not None, it prints a header "Traceback (most recent
call last):"; (2) it prints the exception type and value after the
stack trace; (3) if type is SyntaxError and value has the
appropriate format, it prints the line where the syntax error
occurred with a caret on the next line indicating the approximate
position of the error.
"""
if file is None:
file = sys.stderr
if tb:
_print(file, 'Traceback (most recent call last):')
print_tb(tb, limit, file)
lines = format_exception_only(etype, value)
for line in lines:
_print(file, line, '')
def format_exception(etype, value, tb, limit = None):
"""Format a stack trace and the exception information.
The arguments have the same meaning as the corresponding arguments
to print_exception(). The return value is a list of strings, each
ending in a newline and some containing internal newlines. When
these lines are concatenated and printed, exactly the same text is
printed as does print_exception().
"""
if tb:
list = ['Traceback (most recent call last):\n']
list = list + format_tb(tb, limit)
else:
list = []
list = list + format_exception_only(etype, value)
return list
def format_exception_only(etype, value):
"""Format the exception part of a traceback.
The arguments are the exception type and value such as given by
sys.last_type and sys.last_value. The return value is a list of
strings, each ending in a newline.
Normally, the list contains a single string; however, for
SyntaxError exceptions, it contains several lines that (when
printed) display detailed information about where the syntax
error occurred.
The message indicating which exception occurred is always the last
string in the list.
"""
# An instance should not have a meaningful value parameter, but
# sometimes does, particularly for string exceptions, such as
# >>> raise string1, string2 # deprecated
#
# Clear these out first because issubtype(string1, SyntaxError)
# would throw another exception and mask the original problem.
if (isinstance(etype, BaseException) or
isinstance(etype, types.InstanceType) or
etype is None or type(etype) is str):
return [_format_final_exc_line(etype, value)]
stype = etype.__name__
if not issubclass(etype, SyntaxError):
return [_format_final_exc_line(stype, value)]
# It was a syntax error; show exactly where the problem was found.
lines = []
try:
msg, (filename, lineno, offset, badline) = value.args
except Exception:
pass
else:
filename = filename or "<string>"
lines.append(' File "%s", line %d\n' % (filename, lineno))
if badline is not None:
lines.append(' %s\n' % badline.strip())
if offset is not None:
caretspace = badline.rstrip('\n')[:offset].lstrip()
# non-space whitespace (likes tabs) must be kept for alignment
caretspace = ((c.isspace() and c or ' ') for c in caretspace)
# only three spaces to account for offset1 == pos 0
lines.append(' %s^\n' % ''.join(caretspace))
value = msg
lines.append(_format_final_exc_line(stype, value))
return lines
def _format_final_exc_line(etype, value):
"""Return a list of a single line -- normal case for format_exception_only"""
valuestr = _some_str(value)
if value is None or not valuestr:
line = "%s\n" % etype
else:
line = "%s: %s\n" % (etype, valuestr)
return line
def _some_str(value):
try:
return str(value)
except Exception:
pass
try:
value = unicode(value)
return value.encode("ascii", "backslashreplace")
except Exception:
pass
return '<unprintable %s object>' % type(value).__name__
def print_exc(limit=None, file=None):
"""Shorthand for 'print_exception(sys.exc_type, sys.exc_value, sys.exc_traceback, limit, file)'.
(In fact, it uses sys.exc_info() to retrieve the same information
in a thread-safe way.)"""
if file is None:
file = sys.stderr
try:
etype, value, tb = sys.exc_info()
print_exception(etype, value, tb, limit, file)
finally:
etype = value = tb = None
def format_exc(limit=None):
"""Like print_exc() but return a string."""
try:
etype, value, tb = sys.exc_info()
return ''.join(format_exception(etype, value, tb, limit))
finally:
etype = value = tb = None
def print_last(limit=None, file=None):
"""This is a shorthand for 'print_exception(sys.last_type,
sys.last_value, sys.last_traceback, limit, file)'."""
if not hasattr(sys, "last_type"):
raise ValueError("no last exception")
if file is None:
file = sys.stderr
print_exception(sys.last_type, sys.last_value, sys.last_traceback,
limit, file)
def print_stack(f=None, limit=None, file=None):
"""Print a stack trace from its invocation point.
The optional 'f' argument can be used to specify an alternate
stack frame at which to start. The optional 'limit' and 'file'
arguments have the same meaning as for print_exception().
"""
if f is None:
try:
raise ZeroDivisionError
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame.f_back
print_list(extract_stack(f, limit), file)
def format_stack(f=None, limit=None):
"""Shorthand for 'format_list(extract_stack(f, limit))'."""
if f is None:
try:
raise ZeroDivisionError
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame.f_back
return format_list(extract_stack(f, limit))
def extract_stack(f=None, limit = None):
"""Extract the raw traceback from the current stack frame.
The return value has the same format as for extract_tb(). The
optional 'f' and 'limit' arguments have the same meaning as for
print_stack(). Each item in the list is a quadruple (filename,
line number, function name, text), and the entries are in order
from oldest to newest stack frame.
"""
if f is None:
try:
raise ZeroDivisionError
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame.f_back
if limit is None:
if hasattr(sys, 'tracebacklimit'):
limit = sys.tracebacklimit
list = []
n = 0
while f is not None and (limit is None or n < limit):
lineno = f.f_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line: line = line.strip()
else: line = None
list.append((filename, lineno, name, line))
f = f.f_back
n = n+1
list.reverse()
return list
def tb_lineno(tb):
"""Calculate correct line number of traceback given in tb.
Obsolete in 2.3.
"""
return tb.tb_lineno

View File

@ -1,84 +0,0 @@
"""Define names for all type symbols known in the standard interpreter.
Types that are part of optional modules (e.g. array) are not listed.
"""
import sys
# Iterators in Python aren't a matter of type but of protocol. A large
# and changing number of builtin types implement *some* flavor of
# iterator. Don't check the type! Use hasattr to check for both
# "__iter__" and "next" attributes instead.
NoneType = type(None)
TypeType = type
ObjectType = object
IntType = int
LongType = long
FloatType = float
BooleanType = bool
try:
ComplexType = complex
except NameError:
pass
StringType = str
# StringTypes is already outdated. Instead of writing "type(x) in
# types.StringTypes", you should use "isinstance(x, basestring)". But
# we keep around for compatibility with Python 2.2.
try:
UnicodeType = unicode
StringTypes = (StringType, UnicodeType)
except NameError:
StringTypes = (StringType,)
BufferType = buffer
TupleType = tuple
ListType = list
DictType = DictionaryType = dict
def _f(): pass
FunctionType = type(_f)
LambdaType = type(lambda: None) # Same as FunctionType
CodeType = type(_f.func_code)
def _g():
yield 1
GeneratorType = type(_g())
class _C:
def _m(self): pass
ClassType = type(_C)
UnboundMethodType = type(_C._m) # Same as MethodType
_x = _C()
InstanceType = type(_x)
MethodType = type(_x._m)
BuiltinFunctionType = type(len)
BuiltinMethodType = type([].append) # Same as BuiltinFunctionType
ModuleType = type(sys)
FileType = file
XRangeType = xrange
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
TracebackType = type(tb)
FrameType = type(tb.tb_frame)
del tb
SliceType = slice
EllipsisType = type(Ellipsis)
DictProxyType = type(TypeType.__dict__)
NotImplementedType = type(NotImplemented)
# For Jython, the following two types are identical
GetSetDescriptorType = type(FunctionType.func_code)
MemberDescriptorType = type(FunctionType.func_globals)
del sys, _f, _g, _C, _x # Not for export

View File

@ -1,400 +0,0 @@
"""Python part of the warnings subsystem."""
# Note: function level imports should *not* be used
# in this module as it may cause import lock deadlock.
# See bug 683658.
import linecache
import sys
import types
__all__ = ["warn", "showwarning", "formatwarning", "filterwarnings",
"resetwarnings", "catch_warnings"]
def warnpy3k(message, category=None, stacklevel=1):
"""Issue a deprecation warning for Python 3.x related changes.
Warnings are omitted unless Python is started with the -3 option.
"""
if sys.py3kwarning:
if category is None:
category = DeprecationWarning
warn(message, category, stacklevel+1)
def _show_warning(message, category, filename, lineno, file=None, line=None):
"""Hook to write a warning to a file; replace if you like."""
if file is None:
file = sys.stderr
try:
file.write(formatwarning(message, category, filename, lineno, line))
except IOError:
pass # the file (probably stderr) is invalid - this warning gets lost.
# Keep a working version around in case the deprecation of the old API is
# triggered.
showwarning = _show_warning
def formatwarning(message, category, filename, lineno, line=None):
"""Function to format a warning the standard way."""
s = "%s:%s: %s: %s\n" % (filename, lineno, category.__name__, message)
line = linecache.getline(filename, lineno) if line is None else line
if line:
line = line.strip()
s += " %s\n" % line
return s
def filterwarnings(action, message="", category=Warning, module="", lineno=0,
append=0):
"""Insert an entry into the list of warnings filters (at the front).
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'message' -- a regex that the warning message must match
'category' -- a class that the warning must be a subclass of
'module' -- a regex that the module name must match
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
import re
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(message, basestring), "message must be a string"
assert isinstance(category, (type, types.ClassType)), \
"category must be a class"
assert issubclass(category, Warning), "category must be a Warning subclass"
assert isinstance(module, basestring), "module must be a string"
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
item = (action, re.compile(message, re.I), category,
re.compile(module), lineno)
if append:
filters.append(item)
else:
filters.insert(0, item)
def simplefilter(action, category=Warning, lineno=0, append=0):
"""Insert a simple entry into the list of warnings filters (at the front).
A simple filter matches all modules and messages.
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'category' -- a class that the warning must be a subclass of
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
item = (action, None, category, None, lineno)
if append:
filters.append(item)
else:
filters.insert(0, item)
def resetwarnings():
"""Clear the list of warning filters, so that no filters are active."""
filters[:] = []
class _OptionError(Exception):
"""Exception used by option processing helpers."""
pass
# Helper to process -W options passed via sys.warnoptions
def _processoptions(args):
for arg in args:
try:
_setoption(arg)
except _OptionError, msg:
print >>sys.stderr, "Invalid -W option ignored:", msg
# Helper for _processoptions()
def _setoption(arg):
import re
parts = arg.split(':')
if len(parts) > 5:
raise _OptionError("too many fields (max 5): %r" % (arg,))
while len(parts) < 5:
parts.append('')
action, message, category, module, lineno = [s.strip()
for s in parts]
action = _getaction(action)
message = re.escape(message)
category = _getcategory(category)
module = re.escape(module)
if module:
module = module + '$'
if lineno:
try:
lineno = int(lineno)
if lineno < 0:
raise ValueError
except (ValueError, OverflowError):
raise _OptionError("invalid lineno %r" % (lineno,))
else:
lineno = 0
filterwarnings(action, message, category, module, lineno)
# Helper for _setoption()
def _getaction(action):
if not action:
return "default"
if action == "all": return "always" # Alias
for a in ('default', 'always', 'ignore', 'module', 'once', 'error'):
if a.startswith(action):
return a
raise _OptionError("invalid action: %r" % (action,))
# Helper for _setoption()
def _getcategory(category):
import re
if not category:
return Warning
if re.match("^[a-zA-Z0-9_]+$", category):
try:
cat = eval(category)
except NameError:
raise _OptionError("unknown warning category: %r" % (category,))
else:
i = category.rfind(".")
module = category[:i]
klass = category[i+1:]
try:
m = __import__(module, None, None, [klass])
except ImportError:
raise _OptionError("invalid module name: %r" % (module,))
try:
cat = getattr(m, klass)
except AttributeError:
raise _OptionError("unknown warning category: %r" % (category,))
if not issubclass(cat, Warning):
raise _OptionError("invalid warning category: %r" % (category,))
return cat
# Code typically replaced by _warnings
def warn(message, category=None, stacklevel=1):
"""Issue a warning, or maybe ignore it or raise an exception."""
# Check if message is already a Warning object
if isinstance(message, Warning):
category = message.__class__
# Check category argument
if category is None:
category = UserWarning
assert issubclass(category, Warning)
# Get context information
try:
caller = sys._getframe(stacklevel)
except ValueError:
globals = sys.__dict__
lineno = 1
else:
globals = caller.f_globals
lineno = caller.f_lineno
if '__name__' in globals:
module = globals['__name__']
else:
module = "<string>"
filename = globals.get('__file__')
if filename:
fnl = filename.lower()
if fnl.endswith((".pyc", ".pyo")):
filename = filename[:-1]
else:
if module == "__main__":
try:
filename = sys.argv[0]
except AttributeError:
# embedded interpreters don't have sys.argv, see bug #839151
filename = '__main__'
if not filename:
filename = module
registry = globals.setdefault("__warningregistry__", {})
warn_explicit(message, category, filename, lineno, module, registry,
globals)
def warn_explicit(message, category, filename, lineno,
module=None, registry=None, module_globals=None):
lineno = int(lineno)
if module is None:
module = filename or "<unknown>"
if module[-3:].lower() == ".py":
module = module[:-3] # XXX What about leading pathname?
if registry is None:
registry = {}
if isinstance(message, Warning):
text = str(message)
category = message.__class__
else:
text = message
message = category(message)
key = (text, category, lineno)
# Quick test for common case
if registry.get(key):
return
# Search the filters
for item in filters:
action, msg, cat, mod, ln = item
if ((msg is None or msg.match(text)) and
issubclass(category, cat) and
(mod is None or mod.match(module)) and
(ln == 0 or lineno == ln)):
break
else:
action = defaultaction
# Early exit actions
if action == "ignore":
registry[key] = 1
return
# Prime the linecache for formatting, in case the
# "file" is actually in a zipfile or something.
linecache.getlines(filename, module_globals)
if action == "error":
raise message
# Other actions
if action == "once":
registry[key] = 1
oncekey = (text, category)
if onceregistry.get(oncekey):
return
onceregistry[oncekey] = 1
elif action == "always":
pass
elif action == "module":
registry[key] = 1
altkey = (text, category, 0)
if registry.get(altkey):
return
registry[altkey] = 1
elif action == "default":
registry[key] = 1
else:
# Unrecognized actions are errors
raise RuntimeError(
"Unrecognized action (%r) in warnings.filters:\n %s" %
(action, item))
# Print message and context
showwarning(message, category, filename, lineno)
class WarningMessage(object):
"""Holds the result of a single showwarning() call."""
_WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
"line")
def __init__(self, message, category, filename, lineno, file=None,
line=None):
local_values = locals()
for attr in self._WARNING_DETAILS:
setattr(self, attr, local_values[attr])
self._category_name = category.__name__ if category else None
def __str__(self):
return ("{message : %r, category : %r, filename : %r, lineno : %s, "
"line : %r}" % (self.message, self._category_name,
self.filename, self.lineno, self.line))
class catch_warnings(object):
"""A context manager that copies and restores the warnings filter upon
exiting the context.
The 'record' argument specifies whether warnings should be captured by a
custom implementation of warnings.showwarning() and be appended to a list
returned by the context manager. Otherwise None is returned by the context
manager. The objects appended to the list are arguments whose attributes
mirror the arguments to showwarning().
The 'module' argument is to specify an alternative module to the module
named 'warnings' and imported under that name. This argument is only useful
when testing the warnings module itself.
"""
def __init__(self, record=False, module=None):
"""Specify whether to record warnings and if an alternative module
should be used other than sys.modules['warnings'].
For compatibility with Python 3.0, please consider all arguments to be
keyword-only.
"""
self._record = record
self._module = sys.modules['warnings'] if module is None else module
self._entered = False
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
log = []
def showwarning(*args, **kwargs):
log.append(WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
# filters contains a sequence of filter 5-tuples
# The components of the 5-tuple are:
# - an action: error, ignore, always, default, module, or once
# - a compiled regex that must match the warning message
# - a class representing the warning category
# - a compiled regex that must match the module that is being warned
# - a line number for the line being warning, or 0 to mean any line
# If either if the compiled regexs are None, match anything.
_warnings_defaults = False
try:
from _warnings import (filters, default_action, once_registry,
warn, warn_explicit)
defaultaction = default_action
onceregistry = once_registry
_warnings_defaults = True
except ImportError:
filters = []
defaultaction = "default"
onceregistry = {}
# Module initialization
_processoptions(sys.warnoptions)
if not _warnings_defaults:
silence = [ImportWarning, PendingDeprecationWarning]
# Don't silence DeprecationWarning if -3 or -Q was used.
if not sys.py3kwarning and not sys.flags.division_warning:
silence.append(DeprecationWarning)
for cls in silence:
simplefilter("ignore", category=cls)
bytes_warning = sys.flags.bytes_warning
if bytes_warning > 1:
bytes_action = "error"
elif bytes_warning:
bytes_action = "default"
else:
bytes_action = "ignore"
simplefilter(bytes_action, category=BytesWarning, append=1)
del _warnings_defaults

Binary file not shown.

Binary file not shown.