git-svn-id: svn://svn.icms.temple.edu/lammps-ro/trunk@9179 f3b2605a-c512-4ea7-a41b-209d697bcdaa

This commit is contained in:
sjplimp
2013-01-02 16:39:52 +00:00
parent 299ea01ce0
commit 3aea32f946
10 changed files with 2515 additions and 728 deletions

View File

@ -22,26 +22,59 @@ lammps_data_sections = set(['Atoms',
'Dihedrals By Type',# new. not standard LAMMPS
'Angles By Type']) # new. not standard LAMMPS
def DeleteComments(string,
escape='\\',
comment_char='#'):
escaped_state = False
for i in range(0,len(string)):
if string[i] in escape:
if escaped_state:
escaped_state = False
else:
escaped_state = True
elif string[i] == comment_char:
if not escaped_state:
return string[0:i]
return string
def ExtractDataSection(f,
section_header,
section_name,
comment_char = '#',
include_header = False,
include_section_name = False,
return_line_nums = False):
inside_section = False
if section_name in ('header','Header'): #"Header" section includes beginning
inside_section = True
nonblank_encountered = False
nonheader_encountered = False
i = 0
for line_orig in f:
return_this_line = False
line = line_orig.strip()
if line == section_header:
line = DeleteComments(line_orig).strip()
if line in lammps_data_sections:
nonheader_encountered = True
if section_name in ('header', 'Header'):
# The "header" section includes all lines at the beginning of the
# before any other section is encountered.
if nonheader_encountered:
return_this_line = False
else:
return_this_line = True
elif line == section_name:
inside_section = True
nonblank_encountered = False
if include_header:
if include_section_name:
return_this_line = True
# A block of blank lines (which dont immediately follow
# the section header-name) signal the end of a section:
# the section_name) signal the end of a section:
elif len(line) == 0:
if inside_section and include_header:
if inside_section and include_section_name:
return_this_line = True
if nonblank_encountered:
inside_section = False
@ -59,39 +92,6 @@ def ExtractDataSection(f,
i += 1
def FindDataSection(f,
section_header,
comment_char = '#'):
i_section_start = -1
i_section_stop = -1
inside_section = False
nonblank_encountered = False
i = 0
for line_orig in f:
line = line_orig.strip()
if line == section_header:
inside_section = True
nonblank_encountered = False
# A block of blank lines (which dont immediately follow
# the section header-name) signal the end of a section:
elif len(line) == 0:
if nonblank_encountered:
inside_section = False
i_section_stop = i
break
elif line[0] != comment_char:
if inside_section:
if not nonblank_encountered:
i_section_start = i # <- first non-blank line
nonblank_encountered = True
i += 1
if i_section_stop == -1:
if i_section_start != -1:
i_section_stop = i
return (i_section_start, i_section_stop)
if __name__ == "__main__":
@ -111,7 +111,7 @@ if __name__ == "__main__":
for section_name in sys.argv[1:]:
for line_num in ExtractDataSection(lines,
section_name,
include_header=True,
include_section_name=True,
return_line_nums=True):
line_nums_exclude.add(line_num)
for i in range(0, len(lines)):

View File

@ -36,6 +36,10 @@ from lttree_styles import *
def Intify(s):
if s.isdigit():
return int(s)
elif s[0:2] == 'id':
return int(s[2:])
elif s[0:4] == 'type':
return int(s[4:])
else:
return s
@ -152,10 +156,12 @@ def BelongsToSel(i, sel):
try:
g_program_name = 'lemplify.py'
g_version_str = '0.2'
g_date_str = '2012-4-12'
g_version_str = '0.3'
g_date_str = '2012-12-11'
sys.stderr.write(g_program_name+' v'+g_version_str+' '+g_date_str+'\n')
non_empty_output = False
no_warnings = True
indent = 2
cindent = 0
atomid_selection = []
@ -451,7 +457,6 @@ try:
tokens = line.strip().split()
if (len(tokens) > 0):
if ((tokens[0] == 'atom_style') and
atom_style_undefined):
@ -482,7 +487,10 @@ try:
'impoper_style',
'min_style',
'pair_style',
'special_bonds'])):
'pair_modify',
'special_bonds',
'kspace_style',
'kspace_modify'])):
l_in_init.append((' '*indent)+line.lstrip())
#if (line.strip() == 'LAMMPS Description'):
@ -515,6 +523,9 @@ try:
complained_atom_style_mismatch = True
sys.stderr.write('Warning: The number of columns in the \"Atoms\" section does\n'
' not match the atom_style (see column name list above).\n')
# this is not a very serious warning.
#no_warnings = False <--no need. commenting out
atomid = Intify(tokens[i_atomid])
atomtype = Intify(tokens[i_atomtype])
@ -687,12 +698,14 @@ try:
elif some_in_selection:
sys.stderr.write('WARNING: SELECTION BREAKS BONDS\n')
sys.stderr.write(' (between atom ids: ')
for n in range(0,2):
sys.stderr.write(str(atomids[n])+' ')
sys.stderr.write(')\n'
' The atoms you selected are bonded\n'
' to other atoms you didn\'t select.\n'
' Are you sure you selected the correct atoms?\n')
no_warnings = False
@ -743,6 +756,7 @@ try:
' interactions with other atoms you didn\'t select.\n'
' (They will be ignored.)\n'
' Are you sure you selected the correct atoms?\n')
no_warnings = False
elif (line.strip() == 'Dihedrals'):
@ -790,6 +804,7 @@ try:
' interactions with other atoms you didn\'t select.\n'
' (They will be ignored.)\n'
' Are you sure you selected the correct atoms?\n')
no_warnings = False
elif (line.strip() == 'Impropers'):
@ -837,6 +852,7 @@ try:
' interactions with other atoms you didn\'t select.\n'
' (They will be ignored.)\n'
' Are you sure you selected the correct atoms?\n')
no_warnings = False
elif (line.strip() == 'Bond Coeffs'):
@ -904,7 +920,7 @@ try:
atomtype_i_str = tokens[0]
if '*' in atomtype_i_str:
raise InputError('PROBLEM near or before '+ErrorLeader(infile, lineno)+'\n'
' As of 2012-7, moltemplate discourages use of the "\*\" wildcard\n'
' As of 2012-7, moltemplate forbids use of the "\*\" wildcard\n'
' character in the \"Pair Coeffs\" section.\n')
else:
i = int(atomtype_i_str)
@ -935,7 +951,7 @@ try:
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical bond_coeff command:\n'
' \"'+line.strip()+'\"\n')
#tokens[0] = '@bond:type'+tokens[0]
#tokens[1] = '@bond:type'+tokens[1]
l_in_bond_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (tokens[0] == 'angle_coeff'):
@ -943,7 +959,7 @@ try:
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical angle_coeff command:\n'
' \"'+line.strip()+'\"\n')
tokens[1] = '@angle:type'+tokens[1]
#tokens[1] = '@angle:type'+tokens[1]
l_in_angle_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (tokens[0] == 'dihedral_coeff'):
@ -951,14 +967,14 @@ try:
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical dihedral_coeff command:\n'
' \"'+line.strip()+'\"\n')
tokens[1] = '@dihedral:type'+tokens[1]
#tokens[1] = '@dihedral:type'+tokens[1]
l_in_dihedral_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (tokens[0] == 'improper_coeff'):
if (len(tokens) < 2):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical improper_coeff command:\n'
' \"'+line.strip()+'\"\n')
tokens[1] = '@improper:type'+tokens[1]
#tokens[1] = '@improper:type'+tokens[1]
l_in_improper_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
@ -1188,21 +1204,20 @@ try:
i_a_final = None
i_b_final = None
for i in range(i_a, i_b+1):
if ((i in needed_atomtypes) or
(min_sel_atomtype and min_sel_atomtype <= i)):
if ((i in needed_atomtypes) or (min_sel_atomtype <= i)):
i_a_final = i
break
for i in reversed(range(i_a, i_b+1)):
if ((i in needed_atomtypes) or
(max_sel_atomtype and max_sel_atomtype >= i)):
if ((i in needed_atomtypes) or (max_sel_atomtype >= i)):
i_b_final = i
break
if i_a_final and i_b_final:
if i_a_final == i_b_final:
i_str = '@atom:type'+str(i_a_final)
tokens[1] = i_str
else:
i_str = '@{atom:type'+str(i_a_final)+'}*@{atom:type'+str(i_b_final)+'}'
#if i_a_final and i_b_final:
# if i_a_final == i_b_final:
# i_str = '@atom:type'+str(i_a_final)
# tokens[1] = i_str
# else:
# i_str = '@{atom:type'+str(i_a_final)+'}*@{atom:type'+str(i_b_final)+'}'
@ -1233,29 +1248,38 @@ try:
j_a_final = None
j_b_final = None
for j in range(j_a, j_b+1):
if ((j in needed_atomtypes) or
(min_sel_atomtype and min_sel_atomtype <= j)):
if ((j in needed_atomtypes) or (min_sel_atomtype <= j)):
j_a_final = j
break
for j in reversed(range(j_a, j_b+1)):
if ((j in needed_atomtypes) or
(max_sel_atomtype and max_sel_atomtype >= j)):
if ((j in needed_atomtypes) or (max_sel_atomtype >= j)):
j_b_final = j
break
if j_a_final and j_b_final:
if j_a_final == j_b_final:
j_str = '@atom:type'+str(j_a_final)
tokens[1] = j_str
else:
j_str = '@{atom:type'+str(j_a_final)+'}*@{atom:type'+str(j_b_final)+'}'
#if j_a_final and j_b_final:
# if j_a_final == j_b_final:
# j_str = '@atom:type'+str(j_a_final)
# tokens[1] = j_str
# else:
# j_str = '@{atom:type'+str(j_a_final)+'}*@{atom:type'+str(j_b_final)+'}'
if not (i_a_final and i_b_final and j_a_final and j_b_final):
del(l_in_pair_coeffs[i_line])
elif (('*' in atomtype_i_str) or ('*' in atomtype_j_str)):
del(l_in_pair_coeffs[i_line])
for i in range(i_a_final, i_b_final+1):
for j in range(j_a_final, j_b_final+1):
if j >= i:
tokens[1] = '@atom:type'+str(i)
tokens[2] = '@atom:type'+str(j)
l_in_pair_coeffs.insert(i_line,
(' '*indent)+(' '.join(tokens)+'\n'))
i_line += 1
else:
tokens[1] = i_str
tokens[2] = j_str
tokens[1] = '@atom:type'+tokens[1]
tokens[2] = '@atom:type'+tokens[2]
l_in_pair_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
@ -1311,26 +1335,31 @@ try:
i_a_final = None
i_b_final = None
for i in range(i_a, i_b+1):
if ((i in needed_atomtypes) or
(min_sel_atomtype and min_sel_atomtype <= i)):
if ((i in needed_atomtypes) or (min_sel_atomtype <= i)):
i_a_final = i
break
for i in reversed(range(i_a, i_b+1)):
if ((i in needed_atomtypes) or
(max_sel_atomtype and max_sel_atomtype >= i)):
if ((i in needed_atomtypes) or (max_sel_atomtype >= i)):
i_b_final = i
break
if i_a_final and i_b_final:
if i_a_final == i_b_final:
i_str = '@atom:type'+str(i_a_final)
tokens[1] = i_str
else:
i_str = '@{atom:type'+str(i_a_final)+'}*@{atom:type'+str(i_b_final)+'}'
#if i_a_final and i_b_final:
# if i_a_final == i_b_final:
# i_str = '@atom:type'+str(i_a_final)
# tokens[1] = i_str
# else:
# i_str = '@{atom:type'+str(i_a_final)+'}*@{atom:type'+str(i_b_final)+'}'
if not (i_a_final and i_b_final and j_a_final and j_b_final):
del(l_in_masses[i_line])
elif ('*' in atomtype_i_str):
del(l_in_masses[i_line])
for i in range(i_a_final, i_b_final+1):
tokens[1] = '@atom:type'+str(i)
l_in_masses.insert(i_line, (' '*indent)+(' '.join(tokens)+'\n'))
i_line += 1
else:
tokens[1] = i_str
assert(i_a == i_b)
tokens[1] = '@atom:type'+str(i_a)
l_in_masses[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
@ -1355,7 +1384,7 @@ try:
tokens[0] = '$bond:id'+str(bondid)
tokens[1] = '@bond:type'+str(bondtype)
tokens[2] = '$atom:id'+str(atomid1)
tokens[3] = '$atom:type'+str(atomid2)
tokens[3] = '$atom:id'+str(atomid2)
needed_bondids.add(bondid)
needed_bondtypes.add(bondtype)
l_data_bonds[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
@ -1391,7 +1420,6 @@ try:
tokens = line.strip().split()
bondtype_str = tokens[1]
if ('*' in bondtype_str):
bondtype_tokens = bondtype_str.split('*')
@ -1413,18 +1441,30 @@ try:
if i_b > max_needed_bondtype:
i_b = max_needed_bondtype
if i_a == i_b:
i_str = '@bond:type'+str(i_a)
tokens[1] = i_str
else:
i_str = '@{bond:type'+str(j_a_final)+'}*@{bond:type'+str(j_b_final)+'}'
#if i_a == i_b:
# i_str = '@bond:type'+str(i_a)
# tokens[1] = i_str
#else:
# i_str = '@{bond:type'+str(j_a)+'}*@{bond:type'+str(j_b)+'}'
if ((i_a in needed_bondtypes) and
(i_b in needed_bondtypes)):
l_in_bond_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
else:
if ('*' in bondtype_str):
del(l_in_bond_coeffs[i_line])
for i in range(i_a, i_b+1):
if (i in needed_bondtypes):
tokens[1] = '@bond:type'+str(i)
l_in_bond_coeffs.insert(i_line,
(' '*indent)+(' '.join(tokens)+'\n'))
i_line += 1
else:
assert(i_a == i_b)
if (i_a in needed_bondtypes):
tokens[1] = '@bond:type'+str(i_a)
l_in_bond_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
else:
del(l_in_bond_coeffs[i_line])
@ -1484,7 +1524,6 @@ try:
tokens = line.strip().split()
angletype_str = tokens[1]
if ('*' in angletype_str):
angletype_tokens = angletype_str.split('*')
@ -1506,18 +1545,28 @@ try:
if i_b > max_needed_angletype:
i_b = max_needed_angletype
if i_a == i_b:
i_str = '@angle:type'+str(i_a)
tokens[1] = i_str
else:
i_str = '@{angle:type'+str(j_a_final)+'}*@{angle:type'+str(j_b_final)+'}'
#if i_a == i_b:
# i_str = '@angle:type'+str(i_a)
# tokens[1] = i_str
#else:
# i_str = '@{angle:type'+str(j_a)+'}*@{angle:type'+str(j_b)+'}'
if ((i_a in needed_angletypes) and
(i_b in needed_angletypes)):
l_in_angle_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
else:
if ('*' in angletype_str):
del(l_in_angle_coeffs[i_line])
for i in range(i_a, i_b+1):
if (i in needed_angletypes):
tokens[1] = '@angle:type'+str(i)
l_in_angle_coeffs.insert(i_line,
(' '*indent)+(' '.join(tokens)+'\n'))
i_line += 1
else:
assert(i_a == i_b)
if (i_a in needed_angletypes):
tokens[1] = '@angle:type'+str(i_a)
l_in_angle_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
else:
del(l_in_angle_coeffs[i_line])
@ -1579,7 +1628,6 @@ try:
tokens = line.strip().split()
dihedraltype_str = tokens[1]
if ('*' in dihedraltype_str):
dihedraltype_tokens = dihedraltype_str.split('*')
@ -1601,20 +1649,28 @@ try:
if i_b > max_needed_dihedraltype:
i_b = max_needed_dihedraltype
if i_a == i_b:
i_str = '@dihedral:type'+str(i_a)
tokens[1] = i_str
else:
i_str = '@{dihedral:type'+str(j_a_final)+'}*@{dihedral:type'+str(j_b_final)+'}'
#if i_a == i_b:
# i_str = '@dihedral:type'+str(i_a)
# tokens[1] = i_str
#else:
# i_str = '@{dihedral:type'+str(j_a)+'}*@{dihedral:type'+str(j_b)+'}'
if ((i_a in needed_dihedraltypes) and
(i_b in needed_dihedraltypes)):
l_in_dihedral_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
else:
if ('*' in dihedraltype_str):
del(l_in_dihedral_coeffs[i_line])
for i in range(i_a, i_b+1):
if (i in needed_dihedraltypes):
tokens[1] = '@dihedral:type'+str(i)
l_in_dihedral_coeffs.insert(i_line,
(' '*indent)+(' '.join(tokens)+'\n'))
i_line += 1
else:
assert(i_a == i_b)
if (i_a in needed_dihedraltypes):
tokens[1] = '@dihedral:type'+str(i_a)
l_in_dihedral_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
else:
del(l_in_dihedral_coeffs[i_line])
@ -1676,7 +1732,6 @@ try:
tokens = line.strip().split()
impropertype_str = tokens[1]
if ('*' in impropertype_str):
impropertype_tokens = impropertype_str.split('*')
@ -1698,18 +1753,28 @@ try:
if i_b > max_needed_impropertype:
i_b = max_needed_impropertype
if i_a == i_b:
i_str = '@improper:type'+str(i_a)
tokens[1] = i_str
else:
i_str = '@{improper:type'+str(j_a_final)+'}*@{improper:type'+str(j_b_final)+'}'
#if i_a == i_b:
# i_str = '@improper:type'+str(i_a)
# tokens[1] = i_str
#else:
# i_str = '@{improper:type'+str(j_a)+'}*@{improper:type'+str(j_b)+'}'
if ((i_a in needed_impropertypes) and
(i_b in needed_impropertypes)):
l_in_improper_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
else:
if ('*' in impropertype_str):
del(l_in_improper_coeffs[i_line])
for i in range(i_a, i_b+1):
if (i in needed_impropertypes):
tokens[1] = '@improper:type'+str(i)
l_in_improper_coeffs.insert(i_line,
(' '*indent)+(' '.join(tokens)+'\n'))
i_line += 1
else:
assert(i_a == i_b)
if (i_a in needed_impropertypes):
tokens[1] = '@improper:type'+str(i_a)
l_in_improper_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
else:
del(l_in_improper_coeffs[i_line])
@ -1721,6 +1786,7 @@ try:
if not some_pair_coeffs_read:
sys.stderr.write('Warning: No \"pair coeffs\" set.\n'
' (No interactions between non-bonded atoms defined.)\n')
no_warnings = False
#sys.stderr.write('Writing ttree data to standard out.\n'
# ' You can redirect this to a file using:\n'+
@ -1729,36 +1795,98 @@ try:
if mol_name != '':
sys.stdout.write(mol_name + ' {\n')
if len(l_in_init) > 0:
sys.stdout.write('\n### LAMMPS commands for initialization\n'
'### (These can be overridden later.)\n\n')
l_in_init.insert(0, (' '*cindent)+'write_once(\"'+in_init+'\") {\n')
l_in_init.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_init))
if len(l_in_settings) > 0:
sys.stdout.write('\n### LAMMPS commands for settings\n'
'### (These can be overridden later.)\n\n')
l_in_settings.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_settings.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_settings))
non_empty_output = True
if len(l_in_masses) > 0:
l_in_masses.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_masses.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_masses))
non_empty_output = True
if len(l_in_pair_coeffs) > 0:
l_in_pair_coeffs.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_pair_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_pair_coeffs))
non_empty_output = True
if len(l_in_bond_coeffs) > 0:
l_in_bond_coeffs.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_bond_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_bond_coeffs))
non_empty_output = True
if len(l_in_angle_coeffs) > 0:
l_in_angle_coeffs.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_angle_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_angle_coeffs))
non_empty_output = True
if len(l_in_dihedral_coeffs) > 0:
l_in_dihedral_coeffs.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_dihedral_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_dihedral_coeffs))
non_empty_output = True
if len(l_in_improper_coeffs) > 0:
l_in_improper_coeffs.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_improper_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_improper_coeffs))
non_empty_output = True
if non_empty_output:
sys.stdout.write('\n### DATA sections\n\n')
if len(l_data_masses) > 0:
l_data_masses.insert(0, (' '*cindent)+'write_once(\"'+data_masses+'\") {\n')
l_data_masses.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_masses))
non_empty_output = True
if len(l_data_bond_coeffs) > 0:
l_data_bond_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_bond_coeffs+'\") {\n')
l_data_bond_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_bond_coeffs))
non_empty_output = True
if len(l_data_angle_coeffs) > 0:
l_data_angle_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_angle_coeffs+'\") {\n')
l_data_angle_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angle_coeffs))
non_empty_output = True
if len(l_data_dihedral_coeffs) > 0:
l_data_dihedral_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_dihedral_coeffs+'\") {\n')
l_data_dihedral_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_dihedral_coeffs))
non_empty_output = True
if len(l_data_improper_coeffs) > 0:
l_data_improper_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_improper_coeffs+'\") {\n')
l_data_improper_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_improper_coeffs))
non_empty_output = True
if len(l_data_pair_coeffs) > 0:
l_data_pair_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_pair_coeffs+'\") {\n')
l_data_pair_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_pair_coeffs))
non_empty_output = True
# class2 force fields:
if len(l_data_bondbond_coeffs) > 0:
@ -1766,41 +1894,49 @@ try:
l_data_bondbond_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_bondbond_coeffs))
non_empty_output = True
if len(l_data_bondangle_coeffs) > 0:
l_data_bondangle_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_bondangle_coeffs+'\") {\n')
l_data_bondangle_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_bondangle_coeffs))
non_empty_output = True
if len(l_data_middlebondtorsion_coeffs) > 0:
l_data_middlebondtorsion_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_middlebondtorsion_coeffs+'\") {\n')
l_data_middlebondtorsion_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_middlebondtorsion_coeffs))
non_empty_output = True
if len(l_data_endbondtorsion_coeffs) > 0:
l_data_endbondtorsion_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_endbondtorsion_coeffs+'\") {\n')
l_data_endbondtorsion_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_endbondtorsion_coeffs))
non_empty_output = True
if len(l_data_angletorsion_coeffs) > 0:
l_data_angletorsion_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_angletorsion_coeffs+'\") {\n')
l_data_angletorsion_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angletorsion_coeffs))
non_empty_output = True
if len(l_data_angleangletorsion_coeffs) > 0:
l_data_angleangletorsion_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_angleangletorsion_coeffs+'\") {\n')
l_data_angleangletorsion_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angleangletorsion_coeffs))
non_empty_output = True
if len(l_data_bondbond13_coeffs) > 0:
l_data_bondbond13_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_bondbond13_coeffs+'\") {\n')
l_data_bondbond13_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_bondbond13_coeffs))
non_empty_output = True
if len(l_data_angleangle_coeffs) > 0:
l_data_angleangle_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_angleangle_coeffs+'\") {\n')
l_data_angleangle_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angleangle_coeffs))
non_empty_output = True
# automatic generation of bonded interactions by type:
if len(l_data_angles_by_type) > 0:
@ -1808,25 +1944,30 @@ try:
l_data_angles_by_type.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angles_by_type))
non_empty_output = True
if len(l_data_dihedrals_by_type) > 0:
l_data_dihedrals_by_type.insert(0, (' '*cindent)+'write_once(\"'+data_dihedrals_by_type+'\") {\n')
l_data_dihedrals_by_type.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_dihedrals_by_type))
non_empty_output = True
if len(l_data_impropers_by_type) > 0:
l_data_impropers_by_type.insert(0, (' '*cindent)+'write_once(\"'+data_impropers_by_type+'\") {\n')
l_data_impropers_by_type.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_impropers_by_type))
non_empty_output = True
if len(l_data_atoms) > 0:
l_data_atoms.insert(0, (' '*cindent)+'write(\"'+data_atoms+'\") {\n')
l_data_atoms.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_atoms))
non_empty_output = True
else:
sys.stderr.write('Warning: missing \"Atoms\" section.\n'
' (Did you include a LAMMPS data file in your argument list?)\n')
no_warnings = False
# non-point-like particles
if len(l_data_ellipsoids) > 0:
@ -1855,66 +1996,39 @@ try:
l_data_bonds.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_bonds))
non_empty_output = True
if len(l_data_angles) > 0:
l_data_angles.insert(0, (' '*cindent)+'write(\"'+data_angles+'\") {\n')
l_data_angles.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angles))
non_empty_output = True
if len(l_data_dihedrals) > 0:
l_data_dihedrals.insert(0, (' '*cindent)+'write(\"'+data_dihedrals+'\") {\n')
l_data_dihedrals.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_dihedrals))
non_empty_output = True
if len(l_data_impropers) > 0:
l_data_impropers.insert(0, (' '*cindent)+'write(\"'+data_impropers+'\") {\n')
l_data_impropers.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_impropers))
if len(l_in_pair_coeffs) > 0:
l_in_pair_coeffs.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_pair_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_pair_coeffs))
if len(l_in_bond_coeffs) > 0:
l_in_bond_coeffs.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_bond_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_bond_coeffs))
if len(l_in_angle_coeffs) > 0:
l_in_angle_coeffs.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_angle_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_angle_coeffs))
if len(l_in_dihedral_coeffs) > 0:
l_in_dihedral_coeffs.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_dihedral_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_dihedral_coeffs))
if len(l_in_improper_coeffs) > 0:
l_in_improper_coeffs.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_improper_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_improper_coeffs))
if len(l_in_masses) > 0:
l_in_masses.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_masses.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_masses))
if len(l_in_settings) > 0:
l_in_settings.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_settings.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_settings))
if len(l_in_init) > 0:
l_in_init.insert(0, (' '*cindent)+'write_once(\"'+in_init+'\") {\n')
l_in_init.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_init))
non_empty_output = True
if mol_name != '':
sys.stdout.write('\n} # end of \"'+mol_name+'\" type definition\n')
if non_empty_output and no_warnings:
sys.stderr.write('WARNING: The '+g_program_name+' script has not been rigorously tested.\n'
' Exotic (manybody) pair styles (and other force-field styles\n'
' with unusual syntax) are not understood by '+g_program_name+'\n'
' (although they are supported by moltemplate). Please look over\n'
' the resulting LT file and check for errors. Convert any remaining\n'
' atom, bond, angle, dihedral, or improper id or type numbers to the\n'
' corresponding variables. Feel free to report any bugs you find.\n'
' (-Andrew Jewett 2012-12-11)\n')
except (ValueError, InputError) as err:
sys.stderr.write('\n'+str(err)+'\n')
sys.exit(-1)

View File

@ -39,8 +39,6 @@ except NameError:
basestring = unicode = str
data_atoms = 'Data Atoms' # <-- The name of the file/section storing Atom data.
class LttreeSettings(BasicUISettings):
@ -65,6 +63,8 @@ class LttreeSettings(BasicUISettings):
self.i_atomid=None #<--An integer indicating which column has the atomid
self.i_atomtype=None #<--An integer indicating which column has the atomtype
self.i_molid=None #<--An integer indicating which column has the molid, if applicable
self.infile=None # Name of the outermost file. This is the file
# which was read at the moment parsing begins.
@ -83,9 +83,9 @@ def LttreeParseArgs(argv, settings):
(argv[i].lower() == '-atom-style') or
(argv[i].lower() == '-atom_style')):
if i+1 >= len(argv):
raise InputError('Error: '+argv[i]+' flag should be followed by a an atom_style name.\n'
' (Or single quoted string which includes a space-separated\n'
' list of column names.)\n')
raise InputError('Error('+g_program_name+'): The '+argv[i]+' flag should be followed by a LAMMPS\n'
' atom_style name (or single quoted string containing a space-separated\n'
' list of column names such as: atom-ID atom-type q x y z molecule-ID.)\n')
settings.column_names = AtomStyle2ColNames(argv[i+1])
sys.stderr.write('\n \"'+data_atoms+'\" column format:\n')
sys.stderr.write(' '+(' '.join(settings.column_names))+'\n\n')
@ -234,9 +234,6 @@ def LttreeParseArgs(argv, settings):
# '## dipole or ellipsoid (ie. a rotateable vector).##\n'
# '## (More than one triplet can be specified. The ##\n'
# '## number of entries must be divisible by 3.) ##\n'
#'## 5) Include a ##\n'
#'## write(\"Init.txt\"){atom_style ...} ##\n'
#'## statement in your .ttree file. ##\n'
'########################################################\n')
# The default atom_style is "full"
@ -402,9 +399,10 @@ def _ExecCommands(command_list,
index += 1
# For debugging only
#if ((not isinstance(command, StackableCommand)) and
# (not isinstance(command, ScopeCommand))):
sys.stderr.write(str(command)+'\n')
if ((not isinstance(command, StackableCommand)) and
(not isinstance(command, ScopeCommand)) and
(not isinstance(command, WriteFileCommand))):
sys.stderr.write(str(command)+'\n')
if isinstance(command, PopCommand):
@ -497,11 +495,6 @@ def _ExecCommands(command_list,
elif isinstance(command, WriteFileCommand):
if ((len(command.tmpl_list) > 1) and
(isinstance(command.tmpl_list[1], VarRef)) and
(command.tmpl_list[1].binding.full_name == '$/atom:polymer/monomers[6]/CA')):
pass
# --- Throw away lines containin references to deleted variables:---
# First: To edit the content of a template,
@ -510,8 +503,7 @@ def _ExecCommands(command_list,
for entry in command.tmpl_list:
if isinstance(entry, TextBlock):
tmpl_list.append(TextBlock(entry.text,
entry.locBeg,
entry.locEnd))
entry.srcloc)) #, entry.srcloc_end))
else:
tmpl_list.append(entry)
@ -531,10 +523,10 @@ def _ExecCommands(command_list,
# This requires us to re-parse the contents of this text
# (after it has been rendered), and apply these transformations
# before passing them on to the caller.
if command.file_name == data_atoms:
if command.filename == data_atoms:
text = TransformAtomText(text, matrix_stack.M)
files_content[command.file_name].append(text)
files_content[command.filename].append(text)
elif isinstance(command, ScopeBegin):
@ -559,14 +551,14 @@ def _ExecCommands(command_list,
substitute_vars)
elif isinstance(command, ScopeEnd):
if 'Data Atoms' in files_content:
if data_atoms in files_content:
for ppcommand in postprocessing_commands:
if 'Data Masses' in files_content:
xcm = CalcCM(files_content['Data Atoms'],
files_content['Data Masses'],
if data_masses in files_content:
xcm = CalcCM(files_content[data_atoms],
files_content[data_masses],
settings)
else:
xcm = CalcCM(files_content['Data Atoms'])
xcm = CalcCM(files_content[data_atoms])
if isinstance(ppcommand, PushRightCommand):
matrix_stack.PushCommandsRight(ppcommand.contents,
ppcommand.srcloc,
@ -577,8 +569,8 @@ def _ExecCommands(command_list,
ppcommand.srcloc,
xcm,
which_stack=command.context_node)
files_content['Data Atoms'] = \
TransformAtomText(Files_content['Data Atoms'],
files_content[data_atoms] = \
TransformAtomText(files_content[data_atoms],
matrix_stack.M)
for ppcommand in postprocessing_commands:
@ -600,9 +592,9 @@ def _ExecCommands(command_list,
# After processing the commands in this list,
# merge the templates with the callers template list
for file_name, tmpl_list in files_content.items():
global_files_content[file_name] += \
files_content[file_name]
for filename, tmpl_list in files_content.items():
global_files_content[filename] += \
files_content[filename]
return index
@ -628,17 +620,17 @@ def ExecCommands(commands,
def WriteFiles(files_content, suffix='', write_to_stdout=True):
for file_name, str_list in files_content.items():
if file_name != None:
for filename, str_list in files_content.items():
if filename != None:
out_file = None
if file_name == '':
if filename == '':
if write_to_stdout:
out_file = sys.stdout
else:
out_file = open(file_name+suffix, 'a')
out_file = open(filename+suffix, 'a')
if out_file != None:
out_file.write(''.join(str_list))
if file_name != '':
if filename != '':
out_file.close()
@ -658,8 +650,8 @@ if __name__ == "__main__":
"""
g_program_name = 'lttree.py'
g_date_str = '2012-10-19'
g_version_str = '0.35'
g_date_str = '2012-12-15'
g_version_str = '0.7'
####### Main Code Below: #######
sys.stderr.write(g_program_name+' v'+g_version_str+' '+g_date_str+' ')
@ -691,8 +683,11 @@ if __name__ == "__main__":
g_static_commands,
g_instance_commands)
# Now, carry out the commands
# This involves rendering the templates and post-processing them.
# Interpret the the commands. (These are typically write() or
# write_once() commands, rendering templates into text.
# This step also handles coordinate transformations and delete commands.
# Coordinate transformations can be applied to the rendered text
# as a post-processing step.
sys.stderr.write(' done\nbuilding templates...')
@ -707,6 +702,8 @@ if __name__ == "__main__":
settings,
False)
# Finally: write the rendered text to actual files.
# Erase the files that will be written to:
sys.stderr.write(' done\nwriting templates...')
EraseTemplateFiles(g_static_commands)
@ -723,7 +720,14 @@ if __name__ == "__main__":
ExecCommands(g_instance_commands, files_content, settings, True)
sys.stderr.write(' done\nwriting rendered templates...\n')
WriteFiles(files_content)
sys.stderr.write(' done\n')
# Step 11: Now write the variable bindings/assignments table.
# Now write the variable bindings/assignments table.
sys.stderr.write('writing \"ttree_assignments.txt\" file...')
open('ttree_assignments.txt', 'w').close() # <-- erase previous version.
WriteVarBindingsFile(g_objectdefs)
WriteVarBindingsFile(g_objects)
sys.stderr.write(' done\n')
except (ValueError, InputError) as err:

File diff suppressed because it is too large Load Diff

View File

@ -6,28 +6,30 @@
# Copyright (c) 2012, Regents of the University of California
# All rights reserved.
G_PROGRAM_NAME="moltemplate.sh"
G_VERSION="1.01"
G_DATE="2012-12-15"
echo "${G_PROGRAM_NAME} v${G_VERSION} ${G_DATE}" >&2
echo "" >&2
# First, determine the directory in which this shell script is located.
# (The python script files should also be located here as well.)
# method 1:
# SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# method 2:
# SOURCE="${BASH_SOURCE[0]}"
# while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
# SCRIPT_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
# method 3:
SCRIPT_DIR=$(dirname $0)
#if which python3 > /dev/null; then
# PYTHON_COMMAND='python3'
#else
# PYTHON_COMMAND='python'
#fi
#
# COMMENTING OUT: I don't use python3 any more because python3 has a larger
# memory footprint. Just use regular python instead.)
PYTHON_COMMAND='python'
# Check for python:
# I prefer python over python3 because python3 requires
# more memory. Just use regular python when available.
if which python > /dev/null; then
PYTHON_COMMAND='python'
elif which python3 > /dev/null; then
PYTHON_COMMAND='python3'
else
echo "Error: $G_PROGRAM_NAME requires python or python3" >&2
exit 1
fi
IMOLPATH=""
@ -35,11 +37,14 @@ if [ -n "${MOLTEMPLATE_PATH}" ]; then
IMOLPATH="-importpath ${MOLTEMPLATE_PATH}"
fi
# command that invokes lttree.py
LTTREE_COMMAND="$PYTHON_COMMAND ${SCRIPT_DIR}/lttree.py ${IMOLPATH}"
# command that invokes lttree_check.py
LTTREE_CHECK_COMMAND="$PYTHON_COMMAND ${SCRIPT_DIR}/lttree_check.py ${IMOLPATH}"
# command that invokes lttree.py
LTTREE_COMMAND="$PYTHON_COMMAND ${SCRIPT_DIR}/lttree.py ${IMOLPATH}"
# command that invokes lttree_postprocess.py
LTTREE_POSTPROCESS_COMMAND="$PYTHON_COMMAND ${SCRIPT_DIR}/lttree_postprocess.py ${IMOLPATH}"
# command that invokes nbody_by_type.py
NBODY_COMMAND="$PYTHON_COMMAND ${SCRIPT_DIR}/nbody_by_type.py"
@ -331,6 +336,7 @@ while [ "$i" -lt "$ARGC" ]; do
if [ "$A" = "-nocheck" ]; then
# Disable syntax checking by undefining LTTREE_CHECK_COMMAND
unset LTTREE_CHECK_COMMAND
unset LTTREE_POSTPROCESS_COMMAND
elif [ "$A" = "-overlay-bonds" ]; then
# In that case, do not remove duplicate bond interactions
unset REMOVE_DUPLICATE_BONDS
@ -524,7 +530,6 @@ fi
# ---------------- Interactions By Type -----------------
# At the time of writing, bonded-interactions-by-atom-type were not
# understood by LAMMPS. These features require auxilliary python scripts.
@ -690,6 +695,16 @@ if [ -s "$data_impropers_by_type" ]; then
fi
if [ -n "$LTTREE_POSTPROCESS_COMMAND" ]; then
echo "" >&2
if ! eval $LTTREE_POSTPROCESS_COMMAND $TTREE_ARGS; then
exit 1
fi
echo "" >&2
fi
# -------------------------------------------------------
# If present, then remove duplicate bonds, angles, dihedrals, and impropers
# (unless overridden by the user).
@ -869,6 +884,7 @@ if [ -s "$data_boundary" ]; then
if [ -n "$BOXSIZE_XY" ] || [ -n "$BOXSIZE_XZ" ] || [ -n "$BOXSIZE_YZ" ]; then
if [ -n "$BOXSIZE_XY" ] && [ -n "$BOXSIZE_XZ" ] && [ -n "$BOXSIZE_YZ" ]; then
#echo "triclinic_parameters: XY XZ YZ = $BOXSIZE_XY $BOXSIZE_XZ $BOXSIZE_YZ" >&2
TRICLINIC="True"
else
echo "Error: Problem with triclinic format (\"xy xz yz\") in \"$data_boundary\"" >&2
@ -932,11 +948,13 @@ fi
if [ -n $TRICLINIC ]; then
if [ -z "$TRICLINIC" ]; then
echo " $BOXSIZE_MINX $BOXSIZE_MAXX xlo xhi" >> "$OUT_FILE_DATA"
echo " $BOXSIZE_MINY $BOXSIZE_MAXY ylo yhi" >> "$OUT_FILE_DATA"
echo " $BOXSIZE_MINZ $BOXSIZE_MAXZ zlo zhi" >> "$OUT_FILE_DATA"
else
echo "triclinic parameters: XY XZ YZ = $BOXSIZE_XY $BOXSIZE_XZ $BOXSIZE_YZ" >&2
echo "" >&2
# Otherwise, this is a triclinic (non orthoganal) crystal basis.
# LAMMPS represents triclinic symmetry using a different set of parameters
# (lx,ly,lz,xy,xz,yz) than the PDB file format (alpha,beta,gamma).
@ -1334,7 +1352,7 @@ fi
echo "" > input_scripts_so_far.tmp
for file_name in "$OUT_FILE_INPUT_SCRIPT" "$OUT_FILE_SETTINGS" "$OUT_FILE_INIT"; do
for file_name in "$OUT_FILE_INIT" "$OUT_FILE_INPUT_SCRIPT" "$OUT_FILE_SETTINGS"; do
if [ -s "$file_name" ]; then
echo "postprocessing file \"$file_name\"" >&2
postprocess_input_script.py input_scripts_so_far.tmp < "$file_name" > "$file_name".tmp

View File

@ -37,7 +37,7 @@ class GraphError(GenError):
g_str = str(g)
# If the string representation of the graph is too
# large to fit in one screen, truncate it
g_str_lines = split(g_str, '\n')
g_str_lines = g_str.split('\n')
if (len(g_str_lines) > 12):
g_str_lines = g_str_lines[0:12] + [' ...(additional lines not shown)]']
g_str = '\n'.join(g_str_lines)

View File

@ -42,11 +42,19 @@ pair_style_list=[]
swap_occured = False
warn_wildcard = False
i=0
while i < len(lines_orig):
# Read the next logical line
# Any lines ending in '&' should be merged with the next line before breaking
line_orig = ''
while i < len(lines_orig):
line_counter = 1 + i - num_lines_ignore
line_orig += lines_orig[i]
if ((len(line_orig) < 2) or (line_orig[-2:] != '&\n')):
break
i += 1
line = line_orig.replace('&\n','\n').rstrip('\n')
for i in range(0, len(lines_orig)):
line_counter = 1 + i - num_lines_ignore
line_orig = lines_orig[i]
line = line_orig.rstrip('\n')
comment = ''
if '#' in line_orig:
ic = line.find('#')
@ -82,11 +90,11 @@ for i in range(0, len(lines_orig)):
# If swapped atom types eariler, we also need to swap 'i' with 'j'.
#
# If "hbond/dreiding.." pair style is used with "hybrid" or
# "overlay" then tokens[3] is the name of the pair style
# "hybrid/overlay" then tokens[3] is the name of the pair style
# and tokens[5] is either 'i' or 'j'.
if len(pair_style_list) > 0:
if ((pair_style_list[0] == 'hybrid') or
(pair_style_list[0] == 'overlay')):
(pair_style_list[0] == 'hybrid/overlay')):
if ((tokens[5] == 'i') and (tokens[3][0:6]=='hbond/')):
tokens[5] = 'j'
sys.stderr.write(' (and replaced \"i\" with \"j\")\n')
@ -101,7 +109,7 @@ for i in range(0, len(lines_orig)):
tokens[4] = 'i'
sys.stderr.write(' (and replaced \"j\" with \"i\")\n')
sys.stdout.write(' '.join(tokens)+comment+'\n')
sys.stdout.write((' '.join(tokens)+comment).replace('\n','&\n')+'\n')
else:
if ((('*' in tokens[1]) or ('*' in tokens[2]))
@ -114,6 +122,8 @@ for i in range(0, len(lines_orig)):
if i >= num_lines_ignore:
sys.stdout.write(line_orig)
i += 1
if swap_occured:

View File

@ -91,13 +91,13 @@ else:
# We keep track of the program name and version.
# (This is only used for generating error messages.)
#g_file_name = 'ttree.py'
g_file_name = __file__.split('/')[-1]
g_module_name = g_file_name
if g_file_name.rfind('.py') != -1:
g_module_name = g_file_name[:g_file_name.rfind('.py')]
g_date_str = '2012-10-19'
g_version_str = '0.47'
#g_filename = 'ttree.py'
g_filename = __file__.split('/')[-1]
g_module_name = g_filename
if g_filename.rfind('.py') != -1:
g_module_name = g_filename[:g_filename.rfind('.py')]
g_date_str = '2012-12-15'
g_version_str = '0.73'
@ -133,7 +133,7 @@ class ClassReference(object):
srcloc=None,
statobj=None):
self.statobj_str = statobj_str
if srcloc == None:
if srcloc is None:
self.srcloc = OSrcLoc('', -1)
else:
self.srcloc = srcloc
@ -173,7 +173,7 @@ class Command(object):
class WriteFileCommand(Command):
""" WriteFileCommand
file_name This is the name of the file that will be written to
filename This is the name of the file that will be written to
when the command is executed.
tmpl_list This is the contents of what will be written to the file.
Text strings are often simple strings, however more
@ -185,24 +185,24 @@ class WriteFileCommand(Command):
"""
def __init__(self,
file_name = None,
filename = None,
tmpl_list = None,
srcloc = None):
self.file_name = file_name
if tmpl_list == None:
self.filename = filename
if tmpl_list is None:
self.tmpl_list = []
else:
Command.__init__(self, srcloc)
self.tmpl_list = tmpl_list
def __str__(self):
if self.file_name:
return 'WriteFileCommand(\"'+self.file_name+'\")'
if self.filename:
return 'WriteFileCommand(\"'+self.filename+'\")'
else:
return 'WriteFileCommand(NULL)'
def __copy__(self):
tmpl_list = []
CopyTmplList(self.tmpl_list, tmpl_list) #CHECK:IS_MEMORY_WASTED_HERE?
return WriteFileCommand(self.file_name, tmpl_list, self.srcloc)
return WriteFileCommand(self.filename, tmpl_list, self.srcloc)
class InstantiateCommand(Command):
@ -219,7 +219,7 @@ class InstantiateCommand(Command):
srcloc = None):
Command.__init__(self, srcloc)
self.name = name
#if class_ref == None:
#if class_ref is None:
# self.class_ref = ClassReference()
#else:
self.class_ref = class_ref
@ -328,7 +328,7 @@ class PopRightCommand(PopCommand):
srcloc,
context_node=None):
PopCommand.__init__(self, partner, srcloc, context_node)
assert((partner == None) or isinstance(partner, PushRightCommand))
assert((partner is None) or isinstance(partner, PushRightCommand))
def __copy__(self):
return PopRightCommand(self.partner, self.srcloc, self.context_node)
@ -342,7 +342,7 @@ class PopLeftCommand(PopCommand):
srcloc,
context_node=None):
PopCommand.__init__(self, partner, srcloc, context_node)
assert((partner == None) or isinstance(partner, PushLeftCommand))
assert((partner is None) or isinstance(partner, PushLeftCommand))
def __copy__(self):
return PopLeftCommand(self.partner, self.srcloc, self.context_node)
@ -473,8 +473,8 @@ def CopyTmplList(source_tmpl_list, dest_cpy):
# "entry.nptr" should not contain
# any data yet, so we just ignore it.
# I assert this below:
assert((entry.nptr.cat_node == None) and
(entry.nptr.leaf_node == None))
assert((entry.nptr.cat_node is None) and
(entry.nptr.leaf_node is None))
dest_cpy.append(var_ref)
else:
@ -554,7 +554,7 @@ def FindChild(name, node, dbg_loc):
Note: I have not yet specified what kind of nodes FindChild() operates
on. Both StaticObjs and InstanceObjs have self.children and self.parent.
However only StaticObjs have "self.class_parents".
(These are "parents" in the object-oriented sense.)
("class_parents" are "parents" in the object-oriented sense.)
If "node" (2nd argument) happens t be an StaticObj, this means it also
We must search over the children of these class_parents as well.
@ -648,7 +648,7 @@ def FollowPath(path_tokens, starting_node, dbg_loc):
while i < len(path_tokens):
if path_tokens[i] == '..':
if node.parent == None:
if node.parent is None:
return i, node # <-return the index into the token list
# Caller will know that something went awry
# if the return value is not equal to the
@ -671,13 +671,13 @@ def FollowPath(path_tokens, starting_node, dbg_loc):
# are immediate children of this node's parents are searched.)
while node != None:
child = FindChild(search_target, node, dbg_loc)
if child == None:
if child is None:
node = node.parent
else:
node = child
break
if node == None:
if node is None:
# Caller will know that something went awry if the return
# value is not equal to the length of the token list.
return i, node_before_ellipsis
@ -696,7 +696,7 @@ def FollowPath(path_tokens, starting_node, dbg_loc):
# node for one who's name matches path_tokens[i].
child = FindChild(path_tokens[i], node, dbg_loc)
if child == None:
if child is None:
# In that case, return with the node_list incomplete.
# Let the caller check to see if something went wrong.
return i, node # <-return the index into the token list (i)
@ -709,7 +709,7 @@ def FollowPath(path_tokens, starting_node, dbg_loc):
i += 1
if node.IsDeleted():
sys.stderr.write('(debug_msg: encountered deleted node: \"'+node.name+'\")\n')
#sys.stderr.write('(debug_msg: encountered deleted node: \"'+node.name+'\")\n')
break
return len(path_tokens), node
@ -727,7 +727,7 @@ def PtknsToNode(path_tokens, starting_node, dbg_loc):
if i_last_ptkn < len(path_tokens):
#assert(isinstance(last_node,StaticObj)) <--why did I assert this? seems wrong
if (last_node.parent == None) and (path_tokens[i_last_ptkn] == '..'):
if (last_node.parent is None) and (path_tokens[i_last_ptkn] == '..'):
#In that case, we tried to back out beyond the root of the tree.
raise InputError('Error('+g_module_name+'.PtknsToNode()):\n'
' Invalid variable/class name:\n'
@ -839,7 +839,7 @@ def NodeToPtkns(node):
def NodeToStr(node):
ptkns = NodeToPtkns(node)
assert(len(ptkns) > 0)
if node.parent == None:
if node.parent is None:
assert(node.name == '')
return '/'
path_str = ptkns[0]
@ -860,40 +860,46 @@ def CatLeafNodesToTkns(cat_name, cat_node, leaf_node, dbg_loc):
cat_node_ptkns.append(cat_name+':')
# Determine the path of the leaf node (which should inherit from cat)
deleted = False
leaf_node_ptkns = []
if cat_node != leaf_node:
node = leaf_node
while node.parent != None:
if node.IsDeleted():
deleted = True
leaf_node_ptkns.append('DELETED_'+node.name)
break
leaf_node_ptkns.append(node.name)
if node.parent == cat_node:
break
node = node.parent
leaf_node_ptkns.reverse()
# Check that leaf inherits from cat. If not, print error.
if ((node.parent != cat_node) and (node != cat_node)):
err_msg = 'Error('+g_module_name+'.CatLeafNodesToPtkns()):\n'+\
' Invalid variable (category:leaf) pair\n'
if dbg_loc != None:
cat_node_str = NodeToStr(cat_node)
leaf_node_str = NodeToStr(leaf_node)
err_msg += ' located near '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno)+'\n'+\
' (\"'+leaf_node.name+'\" is not in the scope of \"'+cat_node_str+'/'+cat_name+':\")\n'+\
' This will happen if you used the \"category\" command to manually\n'+\
' create a category/counter which is not defined globally.\n'+\
'\n'+\
' Note: Using the analogy of a unix style file system, \n'+\
' the problem is that \"'+leaf_node_str+'\"\n'+\
' is not a subdirectory of \"'+cat_node_str+'\".\n'+\
'\n'+\
' Note: This often occurs when \".../\" is used. In that case, you may\n'+\
' be able to avoid this error by referring to your variable explicitly\n'+\
' by using chains of \"../\" tokens in the path instead of \".../\".\n'
#' Make sure that your variable you are using is defined in \n'+\
#' an environment (currently \"'+leaf_node_str+'\")\n'+\
#' which lies WITHIN the environment where the category was defined.\n'+\
#' (currently \"'+cat_node_str+'\").\n'
raise InputError(err_msg)
if not deleted:
# Check that leaf inherits from cat. If not, print error.
if ((node.parent != cat_node) and (node != cat_node)):
err_msg = 'Error('+g_module_name+'.CatLeafNodesToPtkns()):\n'+\
' Invalid variable (category:leaf) pair\n'
if dbg_loc != None:
cat_node_str = NodeToStr(cat_node)
leaf_node_str = NodeToStr(leaf_node)
err_msg += ' located near '+ErrorLeader(dbg_loc.infile, dbg_loc.lineno)+'\n'+\
' (\"'+leaf_node.name+'\" is not in the scope of \"'+cat_node_str+'/'+cat_name+':\")\n'+\
' This will happen if you used the \"category\" command to manually\n'+\
' create a category/counter which is not defined globally.\n'+\
'\n'+\
' Note: Using the analogy of a unix style file system, \n'+\
' the problem is that \"'+leaf_node_str+'\"\n'+\
' is not a subdirectory of \"'+cat_node_str+'\".\n'+\
'\n'+\
' Note: This often occurs when \".../\" is used. In that case, you may\n'+\
' be able to avoid this error by referring to your variable explicitly\n'+\
' by using chains of \"../\" tokens in the path instead of \".../\".\n'
#' Make sure that your variable you are using is defined in \n'+\
#' an environment (currently \"'+leaf_node_str+'\")\n'+\
#' which lies WITHIN the environment where the category was defined.\n'+\
#' (currently \"'+cat_node_str+'\").\n'
raise InputError(err_msg)
else:
err_msg = 'Warning: Strange variable path'
if dbg_loc != None:
@ -983,11 +989,11 @@ def FindCatNode(category_name, current_node, srcloc):
elif node.parent != None:
node = node.parent
else:
# node.parent == None, ... we're done
# node.parent is None, ... we're done
break
if cat_node == None:
assert(node.parent == None)
if cat_node is None:
assert(node.parent is None)
#sys.stderr.write('Warning near ' +
# ErrorLeader(srcloc.infile,
# srcloc.lineno)+'\n'+
@ -1327,7 +1333,7 @@ def DescrToCatLeafNodes(descr_str,
cat_start_node = PtknsToNode(cat_ptkns[:-1], context_node, dbg_loc)
# Later on, we will search upwards until we find an ancestor
# node containing a category matching cat_name. This will
# be taken care of later. (See "if cat_node == None:" below.)
# be taken care of later. (See "if cat_node is None:" below.)
else:
# In this case, the user supplied an explicit path
# for the category node. Find it now.
@ -1339,7 +1345,7 @@ def DescrToCatLeafNodes(descr_str,
# of the category node, which is what we want.
leaf_start_node = cat_node
if cat_node == None:
if cat_node is None:
# Otherwise, the user did not indicate where the category
# node is defined, but only supplied the category name.
# (This is the most common scenario.)
@ -1441,7 +1447,7 @@ def DescrToCatLeafNodes(descr_str,
else:
#In that case, we were unable to find the node referenced by "..."
raise InputError('Error('+g_module_name+'.DescrToCatLeafNodes()):\n'
' Broken path containing ellipsis (...)\n'
' Broken path.\n' # containing ellipsis (...)\n'
' class/variable \"'+search_target+'\" not found in this\n'
' context: \"'
#+var_ref.prefix + var_ref.descr_str + var_ref.suffix+'\"\n'
@ -1526,7 +1532,7 @@ def DescrToCatLeafNodes(descr_str,
# Otherwise, the user made a mistake in the path.
# Figure out which kind of mistake and print an error.
if (last_node.parent == None) and (leaf_ptkns[i_last_ptkn] == '..'):
if (last_node.parent is None) and (leaf_ptkns[i_last_ptkn] == '..'):
#In that case, we tried to back out beyond the root of the tree.
raise InputError('Error('+g_module_name+'.DescrToCatLeafNodes()):\n'
' Broken path in variable:\n'
@ -1678,22 +1684,22 @@ class Category(object):
self.name = name
if bindings == None:
if bindings is None:
self.bindings = OrderedDict()
else:
self.bindings = bindings
if counter == None:
if counter is None:
self.counter = SimpleCounter(1,1)
else:
self.counter = counter
if manual_assignments == None:
if manual_assignments is None:
self.manual_assignments = OrderedDict()
else:
self.manual_assignments = manual_assignments
if reserved_values == None:
if reserved_values is None:
self.reserved_values = OrderedDict()
else:
self.reserved_values = reserved_values
@ -1888,7 +1894,7 @@ class StaticObj(object):
##vb##def AddVarBinding(self, var_binding):
##vb## if self.var_bindings == None:
##vb## if self.var_bindings is None:
##vb## self.var_bindings = [var_binding]
##vb## else:
##vb## self.var_bindings.append(var_binding)
@ -1908,7 +1914,7 @@ class StaticObj(object):
# augment their original class definition, adding new content to an
# existing class. In that case self.srcloc_begin will have already
# been assigned. We don't want to overwrite it in that case.)
if self.srcloc_begin == None: # <-- not defined yet?
if self.srcloc_begin is None: # <-- not defined yet?
self.srcloc_begin = lex.GetSrcLoc()
while True:
@ -1967,48 +1973,7 @@ class StaticObj(object):
# tmpl_filename = tmpl_filename.strip(lex.quotes) )
tmpl_contents = lex.ReadTemplate()
# Format quirks:
#1) Remove any newlines at the beginning of the first text block
# in tmpl_content.(Sometimes they cause ugly extra blank lines)
assert(len(tmpl_contents) > 0)
if isinstance(tmpl_contents[0], TextBlock):
first_token_strip = tmpl_contents[0].text.lstrip(' ')
if ((len(first_token_strip) > 0) and
(first_token_strip[0] in lex.newline)):
tmpl_contents[0].text = first_token_strip[1:]
#2) Remove any trailing '}' characters, and complain if absent.
# The last token
assert(isinstance(tmpl_contents[-1], TextBlock))
assert(tmpl_contents[-1].text in ['}',''])
if tmpl_contents[-1].text == '}':
del tmpl_contents[-1]
else:
tmpl_begin = None
if isinstance(tmpl_contents[0], TextBlock):
tmpl_begin = tmpl_contents[0].locBeg
elif isinstance(tmpl_contents[0], VarRef):
tmpl_begin = tmpl_contents[0].srcloc
else:
assert(False)
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n\n'
' Premature end to template.\n'
'(Missing terminator character, usually a \'}\'.) The\n'
'incomplete template begins near '+ErrorLeader(tmpl_begin.infile, tmpl_begin.lineno)+'\n')
#3) Finally, if there is nothing but whitespace between the
# last newline and the end, then strip that off too.
if isinstance(tmpl_contents[-1], TextBlock):
i = len(tmpl_contents[-1].text)-1
if i >= 0:
while ((i >= 0) and
(tmpl_contents[-1].text[i] in lex.whitespace) and
(tmpl_contents[-1].text[i] not in lex.newline)):
i -= 1
if (tmpl_contents[-1].text[i] in lex.newline):
tmpl_contents[-1].text = tmpl_contents[-1].text[0:i+1]
StaticObj.CleanupReadTemplate(tmpl_contents, lex)
#sys.stdout.write(' Parse() after ReadTemplate, tokens:\n\n')
#print(tmpl_contents)
@ -2234,7 +2199,7 @@ class StaticObj(object):
break
# If found, we refer to it as "child".
# If not, then we create a new StaticObj named "child".
if (child == None) or (child.name != child_name):
if (child is None) or (child.name != child_name):
child = StaticObj(child_name, self)
self.children.append(child)
assert(child in self.children)
@ -2343,13 +2308,16 @@ class StaticObj(object):
' Error in new statement near '+lex.error_leader()+'\n'
' A \'[\' character should be followed by a number and a \']\' character.')
array_size.append(int(number_str))
suffix = lex.GetParenExpr()
if ((suffix == '') or
(suffix == lex.eof)):
suffix = lex.get_token()
if ((suffix == '') or (suffix == lex.eof)):
array_suffixes.append('')
array_srclocs.append(base_srcloc)
break
if suffix[0] == '.':
lex.push_token(suffix[1:])
suffix_func = lex.GetParenExpr()
suffix = '.' + suffix_func
array_suffixes.append(suffix)
array_srclocs.append(lex.GetSrcLoc())
else:
@ -2593,13 +2561,13 @@ class StaticObj(object):
pop_mod_command = ModCommand(pop_command,
instobj_descr_str)
if instobj_descr_str != './':
sys.stderr.write('DEBUG: Adding '+str(push_command)+' to '+
staticobj.name+'.instance_commands\n')
#sys.stderr.write('DEBUG: Adding '+str(push_command)+' to '+
# staticobj.name+'.instance_commands\n')
staticobj.instance_commands.append(push_mod_command)
staticobj.instance_commands.append(pop_mod_command)
else:
sys.stderr.write('DEBUG: Adding '+str(push_command)+' to '+
staticobj.name+'.instance_commands_push\n')
#sys.stderr.write('DEBUG: Adding '+str(push_command)+' to '+
# staticobj.name+'.instance_commands_push\n')
# CONTINUEHERE: should I make these PushRight commands and
# append them in the opposite order?
# If so I also have to worry about the case above.
@ -2626,6 +2594,49 @@ class StaticObj(object):
@staticmethod
def CleanupReadTemplate(tmpl_contents, lex):
#1) Remove any newlines at the beginning of the first text block
# in tmpl_content.(Sometimes they cause ugly extra blank lines)
assert(len(tmpl_contents) > 0)
if isinstance(tmpl_contents[0], TextBlock):
first_token_strip = tmpl_contents[0].text.lstrip(' ')
if ((len(first_token_strip) > 0) and
(first_token_strip[0] in lex.newline)):
tmpl_contents[0].text = first_token_strip[1:]
tmpl_contents[0].srcloc.lineno += 1
#2) Remove any trailing '}' characters, and complain if absent.
# The last token
assert(isinstance(tmpl_contents[-1], TextBlock))
assert(tmpl_contents[-1].text in ['}',''])
if tmpl_contents[-1].text == '}':
del tmpl_contents[-1]
else:
tmpl_begin = None
if isinstance(tmpl_contents[0], TextBlock):
tmpl_begin = tmpl_contents[0].srcloc
elif isinstance(tmpl_contents[0], VarRef):
tmpl_begin = tmpl_contents[0].srcloc
else:
assert(False)
raise InputError('Error('+g_module_name+'.StaticObj.Parse()):\n'
' Error near '+lex.error_leader()+'\n\n'
' Premature end to template.\n'
'(Missing terminator character, usually a \'}\'.) The\n'
'incomplete template begins near '+ErrorLeader(tmpl_begin.infile, tmpl_begin.lineno)+'\n')
#3) Finally, if there is nothing but whitespace between the
# last newline and the end, then strip that off too.
if isinstance(tmpl_contents[-1], TextBlock):
i = len(tmpl_contents[-1].text)-1
if i >= 0:
while ((i >= 0) and
(tmpl_contents[-1].text[i] in lex.whitespace) and
(tmpl_contents[-1].text[i] not in lex.newline)):
i -= 1
if (tmpl_contents[-1].text[i] in lex.newline):
tmpl_contents[-1].text = tmpl_contents[-1].text[0:i+1]
def LookupStaticRefs(self):
@ -3006,7 +3017,7 @@ class InstanceObjBasic(object):
##vb##def AddVarBinding(self, var_binding):
##vb## if self.var_bindings == None:
##vb## if self.var_bindings is None:
##vb## self.var_bindings = [var_binding]
##vb## else:
##vb## self.var_bindings.append(var_binding)
@ -3014,7 +3025,7 @@ class InstanceObjBasic(object):
def Dealloc(self):
pass
##vb##if self.var_bindings == None:
##vb##if self.var_bindings is None:
##vb## return
##vb##N = len(self.var_bindings)-1
##vb##for i in range(0,len(self.var_bindings)):
@ -3036,7 +3047,14 @@ class InstanceObjBasic(object):
# eliminated unnecessary data members to save space.
def IsDeleted(self):
return (self.parent == self)
# Return true if self.parent == self
# for this node (or for any ancestor node).
node = self
while node.parent != None:
if node.parent == node:
return True
node = node.parent
return False
@ -3390,6 +3408,7 @@ class InstanceObj(InstanceObjBasic):
# del parent.children[i]
# else:
# i += 1
self.Dealloc()
InstanceObjBasic.DeleteSelf(self)
@ -3437,8 +3456,8 @@ class InstanceObj(InstanceObjBasic):
for class_parent in statobj.class_parents:
# Avoid the "Diamond of Death" multiple inheritance problem
if class_parent not in class_parents_in_use:
sys.stderr.write(' DEBUG: '+self.name+'.class_parent = '+
class_parent.name+'\n')
#sys.stderr.write(' DEBUG: '+self.name+'.class_parent = '+
# class_parent.name+'\n')
self.BuildInstanceTree(class_parent,
class_parents_in_use)
class_parents_in_use.add(class_parent)
@ -3787,7 +3806,7 @@ def AssignVarOrderByFile(context_node, search_instance_commands=False):
isinstance(context_node, InstanceObjBasic))):
#if ((var_ref.prefix == '@') or
# (not search_instance_commands)):
if ((var_ref.binding.order == None) or
if ((var_ref.binding.order is None) or
(var_ref.binding.order > var_ref.srcloc.order)):
var_ref.binding.order = var_ref.srcloc.order
@ -3812,7 +3831,8 @@ def AssignVarOrderByCommand(command_list, prefix_filter):
for var_ref in tmpl_list:
if isinstance(var_ref, VarRef):
if var_ref.prefix in prefix_filter:
if ((var_ref.binding.order == None) or
count += 1
if ((var_ref.binding.order is None) or
(var_ref.binding.order > count)):
var_ref.binding.order = count
@ -3874,7 +3894,7 @@ def AutoAssignVals(cat_node,
for leaf_node,var_binding in var_bind_iter:
if ((var_binding.value == None) or ignore_prior_values):
if ((var_binding.value is None) or ignore_prior_values):
if var_binding.nptr.leaf_node.name[:9] == '__query__':
# -- THE "COUNT" HACK --
@ -3902,7 +3922,7 @@ def AutoAssignVals(cat_node,
while True:
cat.counter.incr()
value = str(cat.counter.query())
if ((reserved_values == None) or
if ((reserved_values is None) or
((cat, value) not in reserved_values)):
break
@ -4049,8 +4069,8 @@ def MergeWriteCommands(command_list):
file_templates = defaultdict(list)
for command in command_list:
if isinstance(command, WriteFileCommand):
if command.file_name != None:
file_templates[command.file_name] += \
if command.filename != None:
file_templates[command.filename] += \
command.tmpl_list
return file_templates
@ -4062,28 +4082,28 @@ def WriteTemplatesValue(file_templates):
write out the contents of the templates contain inside them).
"""
for file_name, tmpl_list in file_templates.items():
if file_name == '':
for filename, tmpl_list in file_templates.items():
if filename == '':
out_file = sys.stdout
else:
out_file = open(file_name, 'a')
out_file = open(filename, 'a')
out_file.write(Render(tmpl_list, substitute_vars=True))
if file_name != '':
if filename != '':
out_file.close()
# Alternate (old method):
#for command in command_list:
# if isinstance(command, WriteFileCommand):
# if command.file_name != None:
# if command.file_name == '':
# if command.filename != None:
# if command.filename == '':
# out_file = sys.stdout
# else:
# out_file = open(command.file_name, 'a')
# out_file = open(command.filename, 'a')
#
# out_file.write(Render(command.tmpl_list))
#
# if command.file_name != '':
# if command.filename != '':
# out_file.close()
@ -4096,34 +4116,34 @@ def WriteTemplatesVarName(file_templates):
"""
for file_name, tmpl_list in file_templates.items():
if file_name != '':
out_file = open(file_name + '.template', 'a')
for filename, tmpl_list in file_templates.items():
if filename != '':
out_file = open(filename + '.template', 'a')
out_file.write(Render(tmpl_list, substitute_vars=False))
out_file.close()
def EraseTemplateFiles(command_list):
file_names = set([])
filenames = set([])
for command in command_list:
if isinstance(command, WriteFileCommand):
if (command.file_name != None) and (command.file_name != ''):
if command.file_name not in file_names:
file_names.add(command.file_name)
if (command.filename != None) and (command.filename != ''):
if command.filename not in filenames:
filenames.add(command.filename)
# Openning the files (in mode 'w') and closing them again
# erases their contents.
out_file = open(command.file_name, 'w')
out_file = open(command.filename, 'w')
out_file.close()
out_file = open(command.file_name + '.template', 'w')
out_file = open(command.filename + '.template', 'w')
out_file.close()
#def ClearTemplates(file_templates):
# for file_name in file_templates:
# if file_name != '':
# out_file = open(file_name, 'w')
# for filename in file_templates:
# if filename != '':
# out_file = open(filename, 'w')
# out_file.close()
# out_file = open(file_name + '.template', 'w')
# out_file = open(filename + '.template', 'w')
# out_file.close()
@ -4154,13 +4174,21 @@ def WriteVarBindingsFile(node):
# Now omit variables whos names contain "*" or "?"
# (these are actually not variables, but wildcard patterns)
if not HasWildCard(var_binding.full_name):
if len(var_binding.refs) > 0:
usage_example = ' #'+\
ErrorLeader(var_binding.refs[0].srcloc.infile, \
var_binding.refs[0].srcloc.lineno)
else:
usage_example = ''
out.write(SafelyEncodeString(var_binding.full_name) +' '+
SafelyEncodeString(var_binding.value)+'\n')
SafelyEncodeString(var_binding.value)
+usage_example+'\n')
out.close()
for child in node.children:
WriteVarBindingsFile(child)
def CustomizeBindings(bindings,
g_objectdefs,
g_objects):
@ -4209,13 +4237,13 @@ def CustomizeBindings(bindings,
##############################################################
def BasicUIReadBindingsFile(bindings_so_far, file_name):
def BasicUIReadBindingsFile(bindings_so_far, filename):
try:
f = open(file_name, 'r')
f = open(filename, 'r')
except IOError:
sys.stderr.write('Error('+g_file_name+'):\n'' : unable to open file\n'
sys.stderr.write('Error('+g_filename+'):\n'' : unable to open file\n'
'\n'
' \"'+bindings_file_name+'\"\n'
' \"'+bindings_filename+'\"\n'
' for reading.\n'
'\n'
' (If you were not trying to open a file with this name, then this could\n'
@ -4224,7 +4252,7 @@ def BasicUIReadBindingsFile(bindings_so_far, file_name):
' to set the variable $atom:wat[2]/H1 to 20.)\n')
sys.exit(1)
BasicUIReadBindingsStream(bindings_so_far, f, file_name)
BasicUIReadBindingsStream(bindings_so_far, f, filename)
f.close()
@ -4276,7 +4304,7 @@ def BasicUIReadBindingsStream(bindings_so_far, in_stream, source_name=''):
text_block = tmpllist[i+1]
assert(isinstance(var_ref, VarRef))
if (not isinstance(text_block, TextBlock)):
raise InputError('Error('+g_file_name+'):\n'
raise InputError('Error('+g_filename+'):\n'
' This is not a valid name-value pair:\n'
' \"'+var_ref.prefix+var_ref.descr_str+' '+text_block.text.rstrip()+'\"\n'
' Each variable asignment should contain a variable name (beginning with\n'
@ -4374,14 +4402,14 @@ def BasicUIParseArgs(argv, settings):
#sys.stderr.write('argv['+str(i)+'] = \"'+argv[i]+'\"\n')
if argv[i] == '-a':
if ((i+1 >= len(argv)) or (argv[i+1][:1] == '-')):
raise InputError('Error('+g_file_name+'):\n'
raise InputError('Error('+g_filename+'):\n'
' Error in -a \"'+argv[i+1]+' argument.\"\n'
' The -a flag '+bind_err_msg)
if (argv[i+1][0] in '@$'):
#tokens = argv[i+1].strip().split(' ')
tokens = SplitQuotedString(argv[i+1].strip())
if len(tokens) < 2:
raise InputError('Error('+g_file_name+'):\n'
raise InputError('Error('+g_filename+'):\n'
' Error in -a \"'+argv[i+1]+'\" argument.\n'
' '+bind_err_msg_var)
BasicUIReadBindingsText(settings.user_bindings_x,
@ -4394,14 +4422,14 @@ def BasicUIParseArgs(argv, settings):
del(argv[i:i+2])
elif argv[i] == '-b':
if ((i+1 >= len(argv)) or (argv[i+1][:1] == '-')):
raise InputError('Error('+g_file_name+'):\n'
raise InputError('Error('+g_filename+'):\n'
' Error in -b \"'+argv[i+1]+' argument.\"\n'
' The -b flag '+bind_err_msg)
if (argv[i+1][0] in '@$'):
#tokens = argv[i+1].strip().split(' ')
tokens = SplitQuotedString(argv[i+1].strip())
if len(tokens) < 2:
raise InputError('Error('+g_file_name+'):\n'
raise InputError('Error('+g_filename+'):\n'
' Error in -b \"'+argv[i+1]+'\" argument.\n'
' '+bind_err_msg_var)
BasicUIReadBindingsText(settings.user_bindings,
@ -4427,7 +4455,7 @@ def BasicUIParseArgs(argv, settings):
(argv[i] == '-import-path') or
(argv[i] == '-import_path')):
if ((i+1 >= len(argv)) or (argv[i+1][:1] == '-')):
raise InputError('Error('+g_file_name+'):\n'
raise InputError('Error('+g_filename+'):\n'
' Error in \"'+argv[i]+'\" argument.\"\n'
' The \"'+argv[i]+'\" argument should be followed by the name of\n'
' an environment variable storing a path for including/importing files.\n')
@ -4436,7 +4464,7 @@ def BasicUIParseArgs(argv, settings):
elif ((argv[i][0] == '-') and (__name__ == "__main__")):
#elif (__name__ == "__main__"):
raise InputError('Error('+g_file_name+'):\n'
raise InputError('Error('+g_filename+'):\n'
'Unrecogized command line argument \"'+argv[i]+'\"\n')
else:
i += 1
@ -4452,14 +4480,14 @@ def BasicUIParseArgs(argv, settings):
# python script which imports this module, so we let them handle it.)
if len(argv) == 1:
raise InputError('Error('+g_file_name+'):\n'
raise InputError('Error('+g_filename+'):\n'
' This program requires at least one argument\n'
' the name of a file containing ttree template commands\n')
elif len(argv) == 2:
try:
settings.lex = TemplateLexer(open(argv[1], 'r'), argv[1]) # Parse text from file
except IOError:
sys.stderr.write('Error('+g_file_name+'):\n'
sys.stderr.write('Error('+g_filename+'):\n'
' unable to open file\n'
' \"'+argv[1]+'\"\n'
' for reading.\n')
@ -4469,7 +4497,7 @@ def BasicUIParseArgs(argv, settings):
else:
# if there are more than 2 remaining arguments,
problem_args = ['\"'+arg+'\"' for arg in argv[1:]]
raise InputError('Syntax Error ('+g_file_name+'):\n'
raise InputError('Syntax Error ('+g_filename+'):\n'
' Problem with argument list.\n'
' The remaining arguments are:\n\n'
' '+(' '.join(problem_args))+'\n\n'
@ -4527,12 +4555,13 @@ def BasicUI(settings,
# and replace the (static) variable references to pointers
# to nodes in the StaticObj tree:
sys.stderr.write(' done\nlooking up @variables...')
# Here we assign pointers for variables in "write_once(){text}" templates:
AssignVarPtrs(static_tree_root, search_instance_commands=False)
#gc.collect()
# Here we assign pointers for variables in "write(){text}" templates:
AssignVarPtrs(static_tree_root, search_instance_commands=True)
#gc.collect()
sys.stderr.write(' done\nconstructing the tree of class definitions...')
sys.stderr.write(' done\n\nclass_def_tree = ' + str(static_tree_root) + '\n\n')
#gc.collect()
# Step 4: Construct the instance tree (the tree of instantiated
# classes) from the static tree of type definitions.
@ -4551,14 +4580,18 @@ def BasicUI(settings,
AssignVarPtrs(instance_tree_root, search_instance_commands=False)
#sys.stderr.write('done\n garbage collection...')
#gc.collect()
# CONTINUEHERE: EXPLANATION NEEDED
# Step 6: Now carry out all of the "delete" commands (deferred earlier).
# (These were deferred because the instance tree must be complete before any
# references to target nodes (with non-trivial paths) can be understood.)
InvokeAllDeletes(instance_tree_root,
null_list_warning=False,
null_list_error=True)
sys.stderr.write(' done\n')
#sys.stderr.write('instance_v = ' + str(instance_tree_root) + '\n')
#gc.collect()
# Step 6: The commands must be carried out in a specific order.
# Step 7: The commands must be carried out in a specific order.
# (for example, the "write()" and "new" commands).
# Search through the tree, and append commands to a command list.
# Then re-order the list in the order the commands should have
@ -4570,7 +4603,7 @@ def BasicUI(settings,
#sys.stderr.write('static_commands = '+str(static_commands)+'\n')
#sys.stderr.write('instance_commands = '+str(instance_commands)+'\n')
# Step 7: We are about to assign numbers to the variables.
# Step 8: We are about to assign numbers to the variables.
# We need to decide the order in which to assign them.
# By default static variables (@) are assigned in the order
# they appear in the file.
@ -4581,7 +4614,7 @@ def BasicUI(settings,
AssignVarOrderByFile(static_tree_root, search_instance_commands=True)
AssignVarOrderByCommand(instance_commands, '$')
# Step 8: Assign the variables.
# Step 9: Assign the variables.
# (If the user requested any customized variable bindings,
# load those now.)
if len(settings.user_bindings_x) > 0:
@ -4605,13 +4638,7 @@ def BasicUI(settings,
static_tree_root,
instance_tree_root)
# Step 9: Now write the variable bindings/assignments table.
sys.stderr.write(' done\nwriting \"ttree_assignments.txt\" file...')
open('ttree_assignments.txt', 'w').close() # <-- erase previous version.
WriteVarBindingsFile(static_tree_root)
WriteVarBindingsFile(instance_tree_root)
# The task of writing the output files is left up to the caller.
sys.stderr.write(' done\n')
@ -4631,7 +4658,7 @@ if __name__ == "__main__":
"""
####### Main Code Below: #######
g_program_name = g_file_name
g_program_name = g_filename
sys.stderr.write(g_program_name+' v'+g_version_str+' '+g_date_str+' ')
sys.stderr.write('\n(python version '+str(sys.version)+')\n')
@ -4659,10 +4686,11 @@ if __name__ == "__main__":
g_instance_commands)
# Now write the files
# (Finally carry out the "write()" and "write_once()" commands.)
# Optional: Multiple commands to write to the same file can be merged to
# reduce the number of times the file is openned and closed.
sys.stderr.write(' done\nwriting templates...\n')
sys.stderr.write('writing templates...\n')
# Erase the files that will be written to:
EraseTemplateFiles(g_static_commands)
EraseTemplateFiles(g_instance_commands)
@ -4679,6 +4707,12 @@ if __name__ == "__main__":
sys.stderr.write(' done\n')
# Step 11: Now write the variable bindings/assignments table.
sys.stderr.write('writing \"ttree_assignments.txt\" file...')
open('ttree_assignments.txt', 'w').close() # <-- erase previous version.
WriteVarBindingsFile(g_objectdefs)
WriteVarBindingsFile(g_objects)
except (ValueError, InputError) as err:
sys.stderr.write('\n\n'+str(err)+'\n')
sys.exit(-1)

View File

@ -22,6 +22,7 @@ import os.path
import sys
from collections import deque
import re, fnmatch
import string
#import gc
@ -51,7 +52,12 @@ __all__ = ["TtreeShlex",
"VarRef",
"VarNPtr",
"VarBinding",
"DeleteLineFromTemplate",
"SplitTemplate",
"SplitTemplateMulti",
"TableFromTemplate",
"ExtractCatName",
#"_TableFromTemplate",
#"_DeleteLineFromTemplate",
"DeleteLinesWithBadVars",
"TemplateLexer"]
@ -97,7 +103,7 @@ class TtreeShlex(object):
self.wordterminators = set([]) #WORDTERMINATORS
self.prev_space_terminator = '' #WORDTERMINATORS
self.whitespace = ' \t\r\n'
self.whitespace = ' \t\r\f\n'
self.whitespace_split = False
self.quotes = '\'"'
self.escape = '\\'
@ -233,17 +239,23 @@ class TtreeShlex(object):
return raw
def read_char(self):
if self.pushback: #WORDTERMINATORS
nextchar = self.pushback.popleft() #WORDTERMINATORS
assert((type(nextchar) is str) and (len(nextchar)==1)) #WORDTERMINATORS
else: #WORDTERMINATORS
nextchar = self.instream.read(1) #WORDTERMINATORS
return nextchar
def read_token(self):
self.prev_space_terminator = '' #WORDTERMINATORS
quoted = False
escapedstate = ' '
while True:
#### self.pushback is now a stack of characters, not tokens #WORDTERMINATORS
if self.pushback: #WORDTERMINATORS
nextchar = self.pushback.popleft() #WORDTERMINATORS
assert((type(nextchar) is str) and (len(nextchar)==1)) #WORDTERMINATORS
else: #WORDTERMINATORS
nextchar = self.instream.read(1) #WORDTERMINATORS
nextchar = self.read_char()
if nextchar == '\n':
self.lineno = self.lineno + 1
if self.debug >= 3:
@ -347,7 +359,7 @@ class TtreeShlex(object):
if self.token or (self.posix and quoted):
# Keep track of which character(s) terminated
# the token (including whitespace and comments).
self.prev_space_terminator = next_char + comment_contents #WORDTERMINATORS
self.prev_space_terminator = nextchar + comment_contents #WORDTERMINATORS
break # emit current token
else:
continue
@ -434,6 +446,8 @@ class TtreeShlex(object):
return self.__bool__()
# The split() function was originally from shlex
# It is included for backwards compatibility.
def split(s, comments=False, posix=True):
lex = TtreeShlex(s, posix=posix)
lex.whitespace_split = True
@ -473,7 +487,7 @@ class InputError(Exception):
def ErrorLeader(infile, lineno):
return '\"'+infile+'\", line '+str(lineno)+': '
return '\"'+infile+'\", line '+str(lineno)
class SrcLoc(object):
@ -490,7 +504,7 @@ class SrcLoc(object):
def SplitQuotedString(string,
quotes='\'\"',
delimiters=' \t\r\n',
delimiters=' \t\r\f\n',
escape='\\',
comment_char='#'):
tokens = []
@ -533,6 +547,8 @@ def SplitQuotedString(string,
c = '\t'
elif (c == 'r') and (escaped_state == True):
c = '\r'
elif (c == 'f') and (escaped_state == True):
c = '\f'
token += c
reading_token = True
escaped_state = False
@ -559,12 +575,12 @@ def EscCharStrToChar(s_in, escape='\\'):
out_lstr.append('\t')
elif (c == 'r'):
out_lstr.append('\r')
elif (c == 'f'):
out_lstr.append('\f')
elif (c == '\''):
out_lstr.append('\'')
elif (c == '\"'):
out_lstr.append('\"')
elif (c == '\r'):
c = '\\r'
elif c in escape:
out_lstr.append(c)
else:
@ -582,7 +598,7 @@ def EscCharStrToChar(s_in, escape='\\'):
def SafelyEncodeString(in_str,
quotes='\'\"',
delimiters=' \t\r\n',
delimiters=' \t\r\f\n',
escape='\\',
comment_char='#'):
"""
@ -605,6 +621,8 @@ def SafelyEncodeString(in_str,
c = '\\t'
elif (c == '\r'):
c = '\\r'
elif (c == '\f'):
c = '\\f'
elif c in quotes:
c = escape[0]+c
elif c in escape:
@ -780,36 +798,28 @@ class LineLex(TtreeShlex):
escaped_state = False
found_space = False
while True:
if self.pushback:
next_char = self.pushback.popleft()
assert((type(next_char) is str) and (len(next_char)==1))
else:
next_char = self.instream.read(1)
#sys.stderr.write('next_char=\"'+next_char+'\"\n')
while next_char == '':
nextchar = self.read_char()
#sys.stderr.write('nextchar=\"'+nextchar+'\"\n')
while nextchar == '':
if not self.filestack:
return self._StripComments(line), '', first_token, found_space
else:
self.pop_source()
if self.pushback:
next_char = self.pushback.popleft()
assert((type(next_char) is str) and (len(next_char)==1))
else:
next_char = self.instream.read(1)
if next_char == '\n':
nextchar = self.read_char()
if nextchar == '\n':
self.lineno += 1
if escaped_state:
escaped_state = False
else:
if next_char in self.escape:
line += next_char
if nextchar in self.escape:
line += nextchar
escaped_state = True
else:
escaped_state = False
if not escaped_state:
if (next_char in self.whitespace):
if (nextchar in self.whitespace):
found_space = True
while first_token in self.source_triggers:
fname = RemoveOuterQuotes(self.get_token())
@ -832,11 +842,11 @@ class LineLex(TtreeShlex):
sys.stderr.write('\nWarning at '+self.error_leader()+':\n'
' duplicate attempt to import file:\n \"'+fname+'\"\n')
line, next_char, first_token, found_space = \
line, nextchar, first_token, found_space = \
self._ReadLine(recur_level+1)
if next_char in self.line_terminators:
if nextchar in self.line_terminators:
line_nrw = line.rstrip(self.whitespace)
#sys.stderr.write('line_nrw=\"'+line_nrw+'\"\n')
if ((len(line_nrw) > 0) and
@ -845,20 +855,20 @@ class LineLex(TtreeShlex):
line = line_nrw[:-1] #delete the line_extend character
# from the end of that line and keep reading...
else:
return self._StripComments(line), next_char, first_token, found_space
return self._StripComments(line), nextchar, first_token, found_space
else:
line += next_char
line += nextchar
if not found_space:
first_token += next_char
first_token += nextchar
def ReadLine(self, recur_level=0):
line, next_char, first_token, found_space = \
line, nextchar, first_token, found_space = \
self._ReadLine(recur_level)
if next_char == self.eof:
if nextchar == self.eof:
self.end_encountered = True
return line + next_char
return line + nextchar
@staticmethod
@ -937,18 +947,18 @@ class OSrcLoc(object):
class TextBlock(object):
"""TextBlock is just a 3-tuple consisting of a string, and two OSrcLocs
"""TextBlock is just a 3-tuple consisting of a string, and an OSrcLoc
to help locate it in the original file from which it was read."""
def __init__(self, text, locBeg, locEnd):
def __init__(self, text, srcloc): #srcloc_end):
self.text = text
if locBeg == None:
self.locBeg = OSrcLoc()
if srcloc == None:
self.srcloc = OSrcLoc()
else:
self.locBeg = locBeg
if locEnd == None:
self.locEnd = OSrcLoc()
else:
self.locEnd = locEnd
self.srcloc = srcloc
#if srcloc_end == None:
# self.srcloc_end = OSrcLoc()
#else:
# self.srcloc_end = srcloc_end
def __repr__(self):
return '\"'+self.text+'\"'
@ -962,7 +972,7 @@ class VarRef(object):
prefix = '', # '$' or '${'
descr_str = '', # <- descriptor string: "cpath/category:lpath"
suffix = '', # '}'
srcloc = None,# location in file where defined
srcloc = None,# location in file where defined
binding = None,# a pointer to a tuple storing the value
nptr = None):# <- see class VarNPtr
@ -1063,10 +1073,35 @@ class VarBinding(object):
return repr((self.full_name, self.value, self.order))
def ExtractCatName(descr_str):
""" When applied to a VarRef's "descr_str" member,
this function will extract the "catname" of it's corresponding
"nptr" member. This can be useful for error reporting.
(I use it to insure that the user is using the correct counter
variable types at various locations in their input files.)
def DeleteLineFromTemplate(tmpl_list,
i_entry, # index into tmpl_list
newline_delimiter='\n'):
"""
ib = descr_str.find(':')
if ib == -1:
ib = len(descr_str)
ia = descr_str.rfind('/')
if ia == -1:
ia = 0
return descr_str[ia:ib]
else:
str_before_colon = descr_str[0:ib]
ia = str_before_colon.rfind('/')
if ia == -1:
return str_before_colon
else:
return str_before_colon[ia+1:]
def _DeleteLineFromTemplate(tmpl_list,
i_entry, # index into tmpl_list
newline_delimiter='\n'):
""" Delete a single line from tmpl_list.
tmpl_list is an alternating list of VarRefs and TextBlocks.
To identify the line, the index corresponding to one of the
@ -1091,7 +1126,9 @@ def DeleteLineFromTemplate(tmpl_list,
break
i_prev_newline -= 1
i_next_newline = i_entry
first_var = True
#i_next_newline = i_entry
i_next_newline = i_prev_newline+1
while i_next_newline < len(tmpl_list):
entry = tmpl_list[i_next_newline]
if isinstance(entry, TextBlock):
@ -1100,6 +1137,12 @@ def DeleteLineFromTemplate(tmpl_list,
# Delete the text before this newline (including the newline)
entry.text = entry.text[i_char_newline+1:]
break
# Invoke DeleteSelf() on the first variables on this line. This will
# insure that it is deleted from the ttree_assignments.txt file.
elif isinstance(entry, VarRef):
if first_var:
entry.nptr.leaf_node.DeleteSelf()
first_var = False
i_next_newline += 1
del tmpl_list[i_prev_newline + 1 : i_next_newline]
@ -1132,9 +1175,9 @@ def DeleteLinesWithBadVars(tmpl_list,
del tmpl_list[:]
return 0
else:
i = DeleteLineFromTemplate(tmpl_list,
i,
newline_delimiter)
i = _DeleteLineFromTemplate(tmpl_list,
i,
newline_delimiter)
else:
i += 1
else:
@ -1143,6 +1186,319 @@ def DeleteLinesWithBadVars(tmpl_list,
def SplitTemplate(ltmpl, delim, delete_blanks = False):
"""
Split a template "ltmpl" into a list of "tokens" (sub-templates)
using a single delimiter string "delim".
INPUT arguments:
"ltmpl" should be an list of TextBlocks and VarRefs.
"delim" should be a simple string (type str)
"delete_blanks" should be a boolean True/False value.
When true, successive occurrences of the delimiter
should not create blank entries in the output list.
OUTPUT:
A list of tokens.
Each "token" is either a TextBlock, a VarRef,
or a (flat, 1-dimensional) list containing more than one of these objects.
The number of "tokens" returned equals the number of times the delimiter
is encountered in any of the TextBlocks in the "ltmpl" argument, plus one.
(... Unless "delete_blanks" is set to True.
Again, in that case, empty entries in this list are deleted.)
"""
assert(type(delim) is str)
if not hasattr(ltmpl, '__len__'):
ltmpl = [ltmpl]
tokens_lltmpl = []
token_ltmpl = []
i = 0
while i < len(ltmpl):
entry = ltmpl[i]
if isinstance(entry, TextBlock):
#if hasattr(entry, 'text'):
prev_src_loc = entry.srcloc
tokens_str = entry.text.split(delim)
lineno = entry.srcloc.lineno
j = 0
while j < len(tokens_str):
token_str = tokens_str[j]
delim_found = False
if (j < len(tokens_str)-1):
delim_found = True
if token_str == '':
if delete_blanks:
if delim == '\n':
lineno += 1
if len(token_ltmpl) > 0:
if len(token_ltmpl) == 1:
tokens_lltmpl.append(token_ltmpl[0])
else:
tokens_lltmpl.append(token_ltmpl)
del token_ltmpl
token_ltmpl = []
j += 1
continue
new_src_loc = OSrcLoc(prev_src_loc.infile, lineno)
new_src_loc.order = prev_src_loc.order
for c in token_str:
# Reminder to self: c != delim (so c!='\n' if delim='\n')
# (We keep track of '\n' characters in delimiters above.)
if c == '\n':
lineno +=1
new_src_loc.lineno = lineno
text_block = TextBlock(token_str,
new_src_loc)
prev_src_loc = new_src_loc
if len(token_ltmpl) == 0:
if delim_found:
tokens_lltmpl.append(text_block)
del token_ltmpl
token_ltmpl = []
else:
token_ltmpl.append(text_block)
else:
if delim_found:
if len(token_str) > 0:
token_ltmpl.append(text_block)
tokens_lltmpl.append(token_ltmpl)
del token_ltmpl
token_ltmpl = []
else:
assert(not delete_blanks)
if (isinstance(token_ltmpl[-1], VarRef)
and
((j>0)
or
((j == len(tokens_str)-1) and
(i == len(ltmpl)-1))
)):
# In that case, this empty token_str corresponds
# to a delimiter which was located immediately
# after the variable name,
# AND
# -there is more text to follow,
# OR
# -we are at the end of the template.
token_ltmpl.append(text_block)
if len(token_ltmpl) == 1:
tokens_lltmpl.append(token_ltmpl[0])
else:
tokens_lltmpl.append(token_ltmpl)
del token_ltmpl
token_ltmpl = []
else:
token_ltmpl.append(text_block)
if (delim_found and (delim == '\n')):
lineno += 1
j += 1
elif isinstance(entry, VarRef):
#elif hasattr(entry, 'descr_str'):
lineno = entry.srcloc.lineno
if ((len(token_ltmpl) == 1) and
isinstance(token_ltmpl[0], TextBlock) and
(len(token_ltmpl[0].text) == 0)):
# special case: if the previous entry was "", then it means
# the delimeter appeared at the end of the previous text block
# leading up to this variable. It separates the variable from
# the previous text block. It is not a text block of length 0.
token_ltmpl[0] = entry
else:
token_ltmpl.append(entry)
elif entry == None:
token_ltmpl.append(entry)
else:
assert(False)
i += 1
# Append left over remains of the last token
if len(token_ltmpl) == 1:
tokens_lltmpl.append(token_ltmpl[0])
elif len(token_ltmpl) > 1:
tokens_lltmpl.append(token_ltmpl)
del token_ltmpl
return tokens_lltmpl
def SplitTemplateMulti(ltmpl, delims, delete_blanks=False):
"""
Split a template "ltmpl" into a list of templates using a
single one or more delimiter strings "delim_list".
If multiple delimiter strings are provided, splitting
begins using the first delimiter string in the list.
Then each token in the resulting list of templates
is split using the next delimiter string
and so on until we run out of delimiter strings.
"ltmpl" should be an list of TextBlocks and VarRefs.
"delims" should be a simple string (type str) or a list of strings
"delete_blanks" is either True or False
If True, then any blank entries in the resulting list of
tokens (sub-templates) will be deleted.
"""
if hasattr(delims, '__len__'): # then it hopefully is a list of strings
delim_list = delims
else:
delim_list = [delims] # then it hopefully is a string
tokens = [ltmpl]
for delim in delim_list:
assert(type(delim) is str)
tokens_il = []
for t in tokens:
sub_tokens = SplitTemplate(t, delim, delete_blanks)
for st in sub_tokens:
if hasattr(st, '__len__'):
if (len(st) > 0) or (not delete_blanks):
tokens_il.append(st)
else:
tokens_il.append(st)
tokens = tokens_il
del tokens_il
return tokens
def _TableFromTemplate(d, ltmpl, delimiters, delete_blanks):
"""
See the docstring for the TableFromTemplate() function for an explanation.
(This _TableFromTemplate() and SplitTemplate() are the workhorse functions
for TableFromTemplate().)
"""
output = SplitTemplateMulti(ltmpl, delimiters[d], delete_blanks[d])
if d > 0:
i = 0
while i < len(output):
output[i] = _TableFromTemplate(d-1,
output[i],
delimiters,
delete_blanks)
# Delete empty LISTS?
if (delete_blanks[d] and
hasattr(output[i], '__len__') and
(len(output[i]) == 0)):
del output[i]
else:
i += 1
return output
def TableFromTemplate(ltmpl, delimiters, delete_blanks=True):
"""
This function can be used to split a template
(a list containing TextBlocks and VarRefs) into a table
into a multidimensional table, with an arbitrary number of dimensions.
Arguments:
ltmpl
An alternating list of TextBlocks and VarRefs containing
the contents of this text template.
delimiters
The user must supply a list or tuple of delimiters: one delimiter for
each dimension in the table, with low-priority delimiters
(such as spaces ' ') appearing first, and higher-priority delimiters
(sich as newlines '\n') appearing later on in the list.
This function will divide the entire "ltmpl" into an n-dimensional
table. Initially the text is split into a list of text using the
highest-priority delimiter. Then each entry in the resulting list is
split into another list according to the next highest-priority delimiter.
This continues until all of the delimiters are used up and an
n-dimensional list-of-lists is remaining.
delete_blanks
The optional "delete_blanks" argument can be used to indicate whether
or not to delete blank entries in the table (which occur as a result
of placing two delimiters next to each other). It should be either
None (default), or it should be an array of booleans matching the
size of the "delimiters" argument. This allows the caller to customize
the merge settings separately for each dimension (for example: to allow
merging of whitespace within a line, without ignoring blank lines).
---- Details: ----
1) Multi-character delimiters ARE allowed (like '\n\n').
2) If a delimiter in the "delimiters" argument is not a string
but is a tuple (or a list) of strings, then the text is split according
to any of the delimiters in that tuple/list (starting from the last entry).
This way, users can use this feature to split text according to multiple
different kinds of whitespace characters (such as ' ' and '\t'), for
example, buy setting delimiters[0] = (' ','\t'). If, additionally,
delete_blanks[0] == True, then this will cause this function to
divide text in without regard to whitespace on a given line (for example).
Detailed example:
table2D = TableFromTmplList(ltmpl,
delimiters = ((' ','\t'), '\n'),
delete_blanks = (True, False))
This divides text in a similar way that the "awk" program does by default,
ie, by ignoring various kinds of whitespace between text fields, but NOT
ignoring blank lines.
3) Any text contained in variable-names is ignored.
"""
# Make a copy of ltmpl
# (The workhorse function "_TableFromTemplate()" makes in-place changes to
# its "ltmpl" argument. I don't want to modify "ltmpl", so I make a copy
# of it before I invoke "_TableFromTemplate()" on it.)
output = [ltmpl[i] for i in range(0, len(ltmpl))]
d = len(delimiters) - 1
output = _TableFromTemplate(d, output, delimiters, delete_blanks)
return output
class TemplateLexer(TtreeShlex):
""" This class extends the standard python lexing module, shlex, adding a
new member function (ReadTemplate()), which can read in a block of raw text,
@ -1161,6 +1517,7 @@ class TemplateLexer(TtreeShlex):
self.var_open_paren = '{' #optional parenthesis surround a variable
self.var_close_paren = '}' #optional parenthesis surround a variable
self.newline = '\n'
self.comment_skip_var = '#'
# Which characters belong in words?
#
@ -1201,7 +1558,7 @@ class TemplateLexer(TtreeShlex):
self.escape + \
self.commenters
# Note:
# self.whitespace = ' \t\r\n'
# self.whitespace = ' \t\r\f\n'
# self.quotes = '\'"'
# self.escape = '\\'
# self.commenters = '#'
@ -1301,6 +1658,7 @@ class TemplateLexer(TtreeShlex):
prev_char_delim=False #True iff we just read a var_delim character like '$'
escaped_state=False #True iff we just read a (non-escaped) esc character '\'
commented_state=False #True iff we are in a region of text where vars should be ignored
var_paren_depth=0 # This is non-zero iff we are inside a
# bracketed variable's name for example: "${var}"
var_terminators = self.whitespace + self.newline + self.var_delim + '{}'
@ -1326,20 +1684,24 @@ class TemplateLexer(TtreeShlex):
terminate_var = False
#delete_prior_escape = False
next_char = self.instream.read(1)
nextchar = self.read_char()
#print(' ReadTemplate() next_char=\''+next_char+'\' at '+self.error_leader()+' esc='+str(escaped_state)+', pvar='+str(prev_char_delim)+', paren='+str(var_paren_depth))
#print(' ReadTemplate() nextchar=\''+nextchar+'\' at '+self.error_leader()+' esc='+str(escaped_state)+', pvar='+str(prev_char_delim)+', paren='+str(var_paren_depth))
# Count newlines:
if next_char in self.newline:
if nextchar in self.newline:
commented_state = False
self.lineno += 1
elif ((nextchar in self.comment_skip_var) and
(not escaped_state)):
commented_state = True
# Check for end-of-file:
if next_char == '':
if nextchar == '':
if escaped_state:
raise InputError('Error: in '+self.error_leader()+'\n\n'
@ -1366,32 +1728,32 @@ class TemplateLexer(TtreeShlex):
elif reading_var:
if next_char in terminators:
if nextchar in terminators:
#sys.stdout.write(' ReadTemplate() readmode found terminator.\n')
if escaped_state:
# In this case, the '\' char was only to prevent terminating
# string prematurely, so delete the '\' character.
#delete_prior_escape = True
if not (next_char in self.var_close_paren):
if not (nextchar in self.var_close_paren):
del var_descr_plist[-1]
var_descr_plist.append(next_char)
var_descr_plist.append(nextchar)
elif not ((var_paren_depth>0) and (next_char in self.var_close_paren)):
elif not ((var_paren_depth>0) and (nextchar in self.var_close_paren)):
terminate_var = True
done_reading = True
if next_char in self.var_open_paren: # eg: next_char == '{'
if nextchar in self.var_open_paren: # eg: nextchar == '{'
#sys.stdout.write(' ReadTemplate() readmode found {.\n')
if escaped_state:
# In this case, the '\' char was only to prevent
# interpreting '{' as a variable prefix
#delete_prior_escape=True # so delete the '\' character
del var_descr_plist[-1]
var_descr_plist.append(next_char)
var_descr_plist.append(nextchar)
else:
# "${var}" is a valid way to refer to a variable
if prev_char_delim:
var_prefix += next_char
var_prefix += nextchar
var_paren_depth = 1
# "${{var}}" is also a valid way to refer to a variable,
# (although strange), but "$va{r}" is not.
@ -1400,47 +1762,47 @@ class TemplateLexer(TtreeShlex):
elif var_paren_depth > 0:
var_paren_depth += 1
elif next_char in self.var_close_paren:
elif nextchar in self.var_close_paren:
#sys.stdout.write(' ReadTemplate() readmode found }.\n')
if escaped_state:
# In this case, the '\' char was only to prevent
# interpreting '}' as a variable suffix,
#delete_prior_escape=True #so skip the '\' character
if (next_char not in terminators):
if (nextchar not in terminators):
del var_descr_plist[-1]
var_descr_plist.append(next_char)
var_descr_plist.append(nextchar)
else:
if var_paren_depth > 0:
var_paren_depth -= 1
if var_paren_depth == 0:
var_suffix = next_char
var_suffix = nextchar
terminate_var = True
elif next_char in var_terminators:
#sys.stdout.write(' ReadTemplate() readmode found var_terminator \"'+next_char+'\"\n')
elif nextchar in var_terminators:
#sys.stdout.write(' ReadTemplate() readmode found var_terminator \"'+nextchar+'\"\n')
if (escaped_state or (var_paren_depth>0)):
# In this case, the '\' char was only to prevent
# interpreting next_char as a variable terminator
# interpreting nextchar as a variable terminator
#delete_prior_escape = True # so skip the '\' character
del var_descr_plist[-1]
var_descr_plist.append(next_char)
var_descr_plist.append(nextchar)
else:
terminate_var = True
elif next_char in self.var_delim: # such as '$'
elif nextchar in self.var_delim: # such as '$'
#sys.stdout.write(' ReadTemplate() readmode found var_delim.\n')
if escaped_state:
# In this case, the '\' char was only to prevent
# interpreting '$' as a new variable name
#delete_prior_escape = True # so skip the '\' character
del var_descr_plist[-1]
var_descr_plist.append(next_char)
var_descr_plist.append(nextchar)
else:
prev_var_delim = True
# Then we are processing a new variable name
terminate_var = True
else:
var_descr_plist.append(next_char)
var_descr_plist.append(nextchar)
prev_char_delim = False
@ -1448,39 +1810,43 @@ class TemplateLexer(TtreeShlex):
# Then we are reading a text_block
if next_char in terminators:
if nextchar in terminators:
if escaped_state:
# In this case, the '\' char was only to prevent terminating
# string prematurely, so delete the '\' character.
#delete_prior_escape = True
del text_block_plist[-1]
text_block_plist.append(next_char)
text_block_plist.append(nextchar)
elif commented_state:
text_block_plist.append(nextchar)
else:
terminate_text = True
done_reading = True
elif next_char in self.var_delim: # such as '$'
elif nextchar in self.var_delim: # such as '$'
if escaped_state:
# In this case, the '\' char was only to prevent
# interpreting '$' as a variable prefix.
#delete_prior_escape=True #so delete the '\' character
del text_block_plist[-1]
text_block_plist.append(next_char)
text_block_plist.append(nextchar)
elif commented_state:
text_block_plist.append(nextchar)
else:
prev_char_delim = True
reading_var = True
var_paren_depth = 0
terminate_text = True
else:
text_block_plist.append(next_char)
text_block_plist.append(nextchar)
#TO DO: use "list_of_chars.join()" instead of '+='
prev_char_delim = False # the previous character was not '$'
# Now deal with "other_esc_chars"
#if escaped_state and (next_char in other_esc_chars):
#if escaped_state and (nextchar in other_esc_chars):
if escaped_state and (next_char in other_esc_chars):
if escaped_state and (nextchar in other_esc_chars):
if reading_var:
#sys.stdout.write(' ReadTemplate: var_descr_str=\''+''.join(var_descr_plist)+'\'\n')
assert(var_descr_plist[-2] in self.escape)
@ -1503,13 +1869,13 @@ class TemplateLexer(TtreeShlex):
tmpl_list.append(''.join(text_block_plist))
else:
tmpl_list.append(TextBlock(''.join(text_block_plist),
OSrcLoc(prev_filename, prev_lineno),
OSrcLoc(self.infile, self.lineno)))
OSrcLoc(prev_filename, prev_lineno)))
#, OSrcLoc(self.infile, self.lineno)))
if not done_reading:
# The character that ended the text block
# was a variable delimiter (like '$'), in which case
# we should put it (next_char) in the variable's prefix.
var_prefix = next_char
# we should put it (nextchar) in the variable's prefix.
var_prefix = nextchar
else:
var_prefix = ''
var_descr_plist = []
@ -1525,10 +1891,10 @@ class TemplateLexer(TtreeShlex):
# Print an error if we terminated in the middle of
# an incomplete variable name:
if prev_char_delim:
raise InputError('Error: in '+self.error_leader()+'\n\n'
raise InputError('Error: near '+self.error_leader()+'\n\n'
'Null variable name.')
if var_paren_depth > 0:
raise InputError('Error: in '+self.error_leader()+'\n\n'
raise InputError('Error: near '+self.error_leader()+'\n\n'
'Incomplete bracketed variable name.')
var_descr_str = ''.join(var_descr_plist)
@ -1575,18 +1941,18 @@ class TemplateLexer(TtreeShlex):
var_prefix = ''
var_descr_plist = []
var_suffix = ''
# Special case: Variable delimeters like '$'
# Special case: Variable delimiters like '$'
# terminate the reading of variables,
# but they also signify that a new
# variable is being read.
if next_char in self.var_delim:
if nextchar in self.var_delim:
# Then we are processing a new variable name
prev_var_delim = True
reading_var = True
var_paren_depth = 0
var_prefix = next_char
var_prefix = nextchar
elif next_char in self.var_close_paren:
elif nextchar in self.var_close_paren:
del text_block_plist
text_block_plist = []
#gc.collect()
@ -1599,7 +1965,7 @@ class TemplateLexer(TtreeShlex):
# caused us to stop reading the previous variable and append
# it to the block of text that comes after.
del text_block_plist
text_block_plist = [next_char]
text_block_plist = [nextchar]
#gc.collect()
prev_var_delim = False
reading_var = False
@ -1609,23 +1975,24 @@ class TemplateLexer(TtreeShlex):
# then the terminal character can be included in the list
# of text_blocks to be returned to the caller.
if done_reading and keep_terminal_char:
#sys.stdout.write('ReadTemplate() appending: \''+next_char+'\'\n')
#sys.stdout.write('ReadTemplate() appending: \''+nextchar+'\'\n')
# Here we create a new text block which contains only the
# terminal character (next_char).
#tmpl_list.append( [next_char,
# terminal character (nextchar).
#tmpl_list.append( [nextchar,
# ((self.infile, self.lineno),
# (self.infile, self.lineno))] )
if simplify_output:
tmpl_list.append(next_char)
tmpl_list.append(nextchar)
else:
tmpl_list.append(TextBlock(next_char,
OSrcLoc(self.infile, self.lineno),
tmpl_list.append(TextBlock(nextchar,
OSrcLoc(self.infile, self.lineno)))
#, OSrcLoc(self.infile, self.lineno)))
if escaped_state:
escaped_state = False
else:
if next_char in self.escape:
if nextchar in self.escape:
escaped_state = True
#print("*** TMPL_LIST0 = ***", tmpl_list)

View File

@ -21,13 +21,13 @@ from ttree import *
import gc
g_file_name = __file__.split('/')[-1]
g_module_name = g_file_name
if g_file_name.rfind('.py') != -1:
g_module_name = g_file_name[:g_file_name.rfind('.py')]
g_filename = __file__.split('/')[-1]
g_module_name = g_filename
if g_filename.rfind('.py') != -1:
g_module_name = g_filename[:g_filename.rfind('.py')]
g_date_str = '2012-9-06'
g_version_str = '0.1'
g_program_name = g_file_name
g_program_name = g_filename
#sys.stderr.write(g_program_name+' v'+g_version_str+' '+g_date_str+' ')
@ -45,18 +45,18 @@ try:
' (This is likely a programmer error.\n'
' This script was not intended to be run by end users.)\n')
bindings_file_name = sys.argv[1]
f = open(bindings_file_name)
bindings_filename = sys.argv[1]
f = open(bindings_filename)
assignments = {}
#BasicUIReadBindingsStream(assignments, f, bindings_file_name)
#BasicUIReadBindingsStream(assignments, f, bindings_filename)
# The line above is robust but it uses far too much memory.
# This for loop below works for most cases.
for line in f:
#tokens = lines.strip().split()
tokens = SplitQuotedString(line.strip()) # like split but handles quotes
if len(tokens) != 2:
if len(tokens) < 2:
continue
assignments[tokens[0]] = tokens[1]
@ -84,9 +84,9 @@ try:
var_name = entry
if var_name not in assignments:
raise(InputError('Error at '+
ErrorLeader(var_ref.src_loc.infile,
var_ref.src_loc.lineno)+
raise(InputError('Error('+g_program_name+')'
#' at '+ErrorLeader(var_ref.src_loc.infile,
# var_ref.src_loc.lineno)+
' unknown variable:\n'
' \"'+var_name+'\"\n'))
else: