Merge "libc: Update auto-gen scripts"

This commit is contained in:
David 'Digit' Turner 2011-02-06 12:53:23 -08:00 committed by Android (Google) Code Review
commit 62e1f374d1
13 changed files with 432 additions and 237 deletions

View file

@ -219,7 +219,7 @@ int sendmsg:socketcall:16(int, const struct msghdr *, unsigned int) -
int recvmsg:socketcall:17(int, struct msghdr *, unsigned int) -1,102,-1
# sockets for sh.
int __socketcall:__socketcall(int, unsigned long*) -1,-1,102
int __socketcall:socketcall(int, unsigned long*) -1,-1,102
# scheduler & real-time
int sched_setscheduler(pid_t pid, int policy, const struct sched_param *param) 156

View file

@ -14,7 +14,7 @@ __socketcall:
/* check return value */
cmp/pz r0
bt __NR___socketcall_end
bt __NR_socketcall_end
/* keep error number */
sts.l pr, @-r15
@ -23,10 +23,10 @@ __socketcall:
mov r0, r4
lds.l @r15+, pr
__NR___socketcall_end:
__NR_socketcall_end:
rts
nop
.align 2
0: .long __NR___socketcall
0: .long __NR_socketcall
1: .long __set_syscall_errno

View file

@ -280,7 +280,7 @@
#define __NR_socketcall (__NR_SYSCALL_BASE + 102)
#define __NR_socketcall (__NR_SYSCALL_BASE + 102)
#define __NR_socketcall (__NR_SYSCALL_BASE + 102)
#define __NR___socketcall (__NR_SYSCALL_BASE + 102)
#define __NR_socketcall (__NR_SYSCALL_BASE + 102)
#define __NR_getcpu (__NR_SYSCALL_BASE + 318)
#define __NR_ioprio_set (__NR_SYSCALL_BASE + 288)
#define __NR_ioprio_get (__NR_SYSCALL_BASE + 289)

View file

@ -7,12 +7,12 @@ from utils import *
noUpdate = 1
def cleanupFile( path ):
def cleanupFile( path, original_path=kernel_original_path ):
"""reads an original header and perform the cleanup operation on it
this functions returns the destination path and the clean header
as a single string"""
# check the header path
src_path = path
src_path = path
if not os.path.exists(src_path):
if noUpdate:
@ -26,7 +26,6 @@ def cleanupFile( path ):
sys.stderr.write( "warning: not a file: %s\n" % path )
return None, None
original_path = kernel_original_path
if os.path.commonprefix( [ src_path, original_path ] ) != original_path:
if noUpdate:
panic( "file is not in 'original' directory: %s\n" % path );
@ -54,27 +53,27 @@ def cleanupFile( path ):
else:
dst_path = "common/" + src_path
dst_path = os.path.normpath( original_path + "/../" + dst_path )
dst_path = os.path.normpath( kernel_cleaned_path + "/" + dst_path )
# now, let's parse the file
#
list = cpp.BlockParser().parseFile(path)
if not list:
blocks = cpp.BlockParser().parseFile(path)
if not blocks:
sys.stderr.write( "error: can't parse '%s'" % path )
sys.exit(1)
list.optimizeMacros( kernel_known_macros )
list.optimizeIf01()
list.removeVarsAndFuncs( statics )
list.removeComments()
list.removeEmptyLines()
list.removeMacroDefines( kernel_ignored_macros )
list.insertDisclaimer( kernel.kernel_disclaimer )
list.replaceTokens( kernel_token_replacements )
blocks.optimizeMacros( kernel_known_macros )
blocks.optimizeIf01()
blocks.removeVarsAndFuncs( statics )
blocks.replaceTokens( kernel_token_replacements )
blocks.removeComments()
blocks.removeMacroDefines( kernel_ignored_macros )
blocks.removeWhiteSpace()
out = StringOutput()
list.write(out)
out.write( kernel_disclaimer )
blocks.writeWithWarning(out, kernel_warning, 4)
return dst_path, out.get()
@ -92,12 +91,15 @@ if __name__ == "__main__":
if the content has changed. with this, you can pass more
than one file on the command-line
-k<path> specify path of original kernel headers
-d<path> specify path of cleaned kernel headers
<header_path> must be in a subdirectory of 'original'
""" % os.path.basename(sys.argv[0])
sys.exit(1)
try:
optlist, args = getopt.getopt( sys.argv[1:], 'uvk:' )
optlist, args = getopt.getopt( sys.argv[1:], 'uvk:d:' )
except:
# unrecognized option
sys.stderr.write( "error: unrecognized option\n" )
@ -111,6 +113,8 @@ if __name__ == "__main__":
D_setlevel(1)
elif opt == '-k':
kernel_original_path = arg
elif opt == '-d':
kernel_cleaned_path = arg
if len(args) == 0:
usage()
@ -143,9 +147,6 @@ if __name__ == "__main__":
print "cleaning: %-*s -> %-*s (%s)" % ( 35, path, 35, dst_path, r )
if os.environ.has_key("ANDROID_PRODUCT_OUT"):
b.updateP4Files()
else:
b.updateFiles()
b.updateGitFiles()
sys.exit(0)

View file

@ -1529,7 +1529,7 @@ def test_CppExpr():
class Block:
"""a class used to model a block of input source text. there are two block types:
- direcive blocks: contain the tokens of a single pre-processor directive (e.g. #if)
- directive blocks: contain the tokens of a single pre-processor directive (e.g. #if)
- text blocks, contain the tokens of non-directive blocks
the cpp parser class below will transform an input source file into a list of Block
@ -1609,6 +1609,91 @@ class Block:
else:
return None
def removeWhiteSpace(self):
# Remove trailing whitespace and empty lines
# All whitespace is also contracted to a single space
if self.directive != None:
return
tokens = []
line = 0 # index of line start
space = -1 # index of first space, or -1
ii = 0
nn = len(self.tokens)
while ii < nn:
tok = self.tokens[ii]
# If we find a space, record its position if this is the first
# one the line start or the previous character. Don't append
# anything to tokens array yet though.
if tok.id == tokSPACE:
if space < 0:
space = ii
ii += 1
continue
# If this is a line space, ignore the spaces we found previously
# on the line, and remove empty lines.
if tok.id == tokLN:
old_line = line
old_space = space
#print "N line=%d space=%d ii=%d" % (line, space, ii)
ii += 1
line = ii
space = -1
if old_space == old_line: # line only contains spaces
#print "-s"
continue
if ii-1 == old_line: # line is empty
#print "-e"
continue
tokens.append(tok)
continue
# Other token, append any space range if any, converting each
# one to a single space character, then append the token.
if space >= 0:
jj = space
space = -1
while jj < ii:
tok2 = self.tokens[jj]
tok2.value = " "
tokens.append(tok2)
jj += 1
tokens.append(tok)
ii += 1
self.tokens = tokens
def writeWithWarning(self,out,warning,left_count,repeat_count):
# removeWhiteSpace() will sometimes creates non-directive blocks
# without any tokens. These come from blocks that only contained
# empty lines and spaces. They should not be printed in the final
# output, and then should not be counted for this operation.
#
if not self.directive and self.tokens == []:
return left_count
if self.directive:
out.write(str(self) + "\n")
left_count -= 1
if left_count == 0:
out.write(warning)
left_count = repeat_count
else:
for tok in self.tokens:
out.write(str(tok))
if tok.id == tokLN:
left_count -= 1
if left_count == 0:
out.write(warning)
left_count = repeat_count
return left_count
def __repr__(self):
"""generate the representation of a given block"""
if self.directive:
@ -1651,7 +1736,6 @@ class Block:
return result
class BlockList:
"""a convenience class used to hold and process a list of blocks returned by
the cpp parser"""
@ -1694,6 +1778,10 @@ class BlockList:
if b.isIf():
b.expr.removePrefixed(prefix,names)
def removeWhiteSpace(self):
for b in self.blocks:
b.removeWhiteSpace()
def optimizeAll(self,macros):
self.optimizeMacros(macros)
self.optimizeIf01()
@ -1713,72 +1801,17 @@ class BlockList:
def write(self,out):
out.write(str(self))
def writeWithWarning(self,out,warning,repeat_count):
left_count = repeat_count
for b in self.blocks:
left_count = b.writeWithWarning(out,warning,left_count,repeat_count)
def removeComments(self):
for b in self.blocks:
for tok in b.tokens:
if tok.id == tokSPACE:
tok.value = " "
def removeEmptyLines(self):
# state = 1 => previous line was tokLN
# state = 0 => previous line was directive
state = 1
for b in self.blocks:
if b.isDirective():
#print "$$$ directive %s" % str(b)
state = 0
else:
# a tokLN followed by spaces is replaced by a single tokLN
# several successive tokLN are replaced by a single one
#
dst = []
src = b.tokens
n = len(src)
i = 0
#print "$$$ parsing %s" % repr(src)
while i < n:
# find final tokLN
j = i
while j < n and src[j].id != tokLN:
j += 1
if j >= n:
# uhhh
dst += src[i:]
break
if src[i].id == tokSPACE:
k = i+1
while src[k].id == tokSPACE:
k += 1
if k == j: # empty lines with spaces in it
i = j # remove the spaces
if i == j:
# an empty line
if state == 1:
i += 1 # remove it
else:
state = 1
dst.append(src[i])
i += 1
else:
# this line is not empty, remove trailing spaces
k = j
while k > i and src[k-1].id == tokSPACE:
k -= 1
nn = i
while nn < k:
dst.append(src[nn])
nn += 1
dst.append(src[j])
state = 0
i = j+1
b.tokens = dst
def removeVarsAndFuncs(self,knownStatics=set()):
"""remove all extern and static declarations corresponding
to variable and function declarations. we only accept typedefs
@ -1789,66 +1822,118 @@ class BlockList:
which is useful for optimized byteorder swap functions and
stuff like that.
"""
# state = 1 => typedef/struct encountered
# state = 2 => vars or func declaration encountered, skipping until ";"
# state = 0 => normal (i.e. LN + spaces)
# state = 1 => typedef/struct encountered, ends with ";"
# state = 2 => var declaration encountered, ends with ";"
# state = 3 => func declaration encountered, ends with "}"
state = 0
depth = 0
blocks2 = []
skipTokens = False
for b in self.blocks:
if b.isDirective():
blocks2.append(b)
else:
n = len(b.tokens)
i = 0
first = 0
if state == 2:
if skipTokens:
first = n
else:
first = 0
while i < n:
tok = b.tokens[i]
if state == 0:
bad = 0
if tok.id in [tokLN, tokSPACE]:
pass
elif tok.value in [ 'struct', 'typedef', 'enum', 'union', '__extension__' ]:
state = 1
else:
if tok.value in [ 'static', 'extern', '__KINLINE' ]:
j = i+1
ident = ""
while j < n and not (b.tokens[j].id in [ '(', ';' ]):
if b.tokens[j].id == tokIDENT:
ident = b.tokens[j].value
j += 1
if j < n and ident in knownStatics:
# this is a known static, we're going to keep its
# definition in the final output
state = 1
else:
#print "### skip static '%s'" % ident
pass
if state == 0:
if i > first:
#print "### intermediate from '%s': '%s'" % (tok.value, repr(b.tokens[first:i]))
blocks2.append( Block(b.tokens[first:i]) )
state = 2
first = n
else: # state > 0
if tok.id == '{':
tokid = tok.id
# If we are not looking for the start of a new
# type/var/func, then skip over tokens until
# we find our terminator, managing the depth of
# accolades as we go.
if state > 0:
terminator = False
if tokid == '{':
depth += 1
elif tok.id == '}':
elif tokid == '}':
if depth > 0:
depth -= 1
if (depth == 0) and (state == 3):
terminator = True
elif tokid == ';' and depth == 0:
terminator = True
elif depth == 0 and tok.id == ';':
if state == 2:
first = i+1
if terminator:
# we found the terminator
state = 0
if skipTokens:
skipTokens = False
first = i+1
i += 1
i = i+1
continue
# We are looking for the start of a new type/func/var
# ignore whitespace
if tokid in [tokLN, tokSPACE]:
i = i+1
continue
# Is it a new type definition, then start recording it
if tok.value in [ 'struct', 'typedef', 'enum', 'union', '__extension__' ]:
#print "$$$ keep type declr" + repr(b.tokens[i:])
state = 1
i = i+1
continue
# Is it a variable or function definition. If so, first
# try to determine which type it is, and also extract
# its name.
#
# We're going to parse the next tokens of the same block
# until we find a semi-column or a left parenthesis.
#
# The semi-column corresponds to a variable definition,
# the left-parenthesis to a function definition.
#
# We also assume that the var/func name is the last
# identifier before the terminator.
#
j = i+1
ident = ""
while j < n:
tokid = b.tokens[j].id
if tokid == '(': # a function declaration
state = 3
break
elif tokid == ';': # a variable declaration
state = 2
break
if tokid == tokIDENT:
ident = b.tokens[j].value
j += 1
if j >= n:
# This can only happen when the declaration
# does not end on the current block (e.g. with
# a directive mixed inside it.
#
# We will treat it as malformed because
# it's very hard to recover from this case
# without making our parser much more
# complex.
#
#print "### skip unterminated static '%s'" % ident
break
if ident in knownStatics:
#print "### keep var/func '%s': %s" % (ident,repr(b.tokens[i:j]))
pass
else:
# We're going to skip the tokens for this declaration
#print "### skip variable /func'%s': %s" % (ident,repr(b.tokens[i:j]))
if i > first:
blocks2.append( Block(b.tokens[first:i]))
skipTokens = True
first = n
i = i+1
if i > first:
#print "### final '%s'" % repr(b.tokens[first:i])

View file

@ -16,7 +16,11 @@ kernel_dirs = [ "linux", "asm", "asm-generic", "mtd" ]
# path to the directory containing the original kernel headers
#
kernel_original_path = os.path.normpath( find_program_dir() + '/../original' )
kernel_original_path = os.path.normpath( find_program_dir() + '/../../../../external/kernel-headers/original' )
# path to the default location of the cleaned-up headers
#
kernel_cleaned_path = os.path.normpath( find_program_dir() + '/..' )
# a special value that is used to indicate that a given macro is known to be
# undefined during optimization
@ -112,6 +116,18 @@ kernel_disclaimer = """\
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
*** To edit the content of this header, modify the corresponding
*** source file (e.g. under external/kernel-headers/original/) then
*** run bionic/libc/kernel/tools/update_all.py
***
*** Any manual change here will be lost the next time this script will
*** be run. You've been warned!
***
****************************************************************************
****************************************************************************/
"""
# This is the warning line that will be inserted every N-th line in the output
kernel_warning = """\
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
"""

View file

@ -3,7 +3,7 @@
# this program is used to find source code that includes linux kernel headers directly
# (e.g. with #include <linux/...> or #include <asm/...>)
#
# then it lists
# then it lists them on the standard output.
import sys, cpp, glob, os, re, getopt, kernel
from utils import *
@ -12,20 +12,14 @@ from defaults import *
program_dir = find_program_dir()
wanted_archs = kernel_archs
wanted_include = os.path.normpath(program_dir + '/../original')
wanted_config = os.path.normpath(program_dir + '/../original/config')
wanted_config = None
def usage():
print """\
usage: find_headers.py [options] (file|directory|@listfile)+
usage: find_headers.py [options] <kernel-root> (file|directory|@listfile)+
options:
-d <include-dir> specify alternate kernel headers
'include' directory
('%s' by default)
-c <file> specify alternate .config file
('%s' by default)
-c <file> specify .config file (none by default)
-a <archs> used to specify an alternative list
of architectures to support
@ -37,12 +31,12 @@ def usage():
by a set of source files or directories containing them. the search
is recursive to find *all* required files.
""" % ( wanted_include, wanted_config, string.join(kernel_archs,",") )
""" % ( string.join(kernel_archs,",") )
sys.exit(1)
try:
optlist, args = getopt.getopt( sys.argv[1:], 'vc:d:a:' )
optlist, args = getopt.getopt( sys.argv[1:], 'vc:d:a:k:' )
except:
# unrecognized option
print "error: unrecognized option"
@ -51,8 +45,6 @@ except:
for opt, arg in optlist:
if opt == '-a':
wanted_archs = string.split(arg,',')
elif opt == '-d':
wanted_include = arg
elif opt == '-c':
wanted_config = arg
elif opt == '-v':
@ -62,10 +54,10 @@ for opt, arg in optlist:
else:
usage()
if len(args) < 1:
if len(args) < 2:
usage()
kernel_root = wanted_include
kernel_root = args[0]
if not os.path.exists(kernel_root):
sys.stderr.write( "error: directory '%s' does not exist\n" % kernel_root )
sys.exit(1)
@ -74,26 +66,26 @@ if not os.path.isdir(kernel_root):
sys.stderr.write( "error: '%s' is not a directory\n" % kernel_root )
sys.exit(1)
if not os.path.isdir(kernel_root+"/linux"):
sys.stderr.write( "error: '%s' does not have a 'linux' directory\n" % kernel_root )
if not os.path.isdir(kernel_root+"/include/linux"):
sys.stderr.write( "error: '%s' does not have an 'include/linux' directory\n" % kernel_root )
sys.exit(1)
if not os.path.exists(wanted_config):
sys.stderr.write( "error: file '%s' does not exist\n" % wanted_config )
sys.exit(1)
if wanted_config:
if not os.path.exists(wanted_config):
sys.stderr.write( "error: file '%s' does not exist\n" % wanted_config )
sys.exit(1)
if not os.path.isfile(wanted_config):
sys.stderr.write( "error: '%s' is not a file\n" % wanted_config )
sys.exit(1)
if not os.path.isfile(wanted_config):
sys.stderr.write( "error: '%s' is not a file\n" % wanted_config )
sys.exit(1)
# find all architectures in the kernel tree
re_asm_ = re.compile(r"asm-(\w+)")
archs = []
for dir in os.listdir(kernel_root):
m = re_asm_.match(dir)
if m:
if verbose: print ">> found kernel arch '%s'" % m.group(1)
archs.append(m.group(1))
for archdir in os.listdir(kernel_root+"/arch"):
if os.path.exists("%s/arch/%s/include/asm" % (kernel_root, archdir)):
if verbose:
print "Found arch '%s'" % archdir
archs.append(archdir)
# if we're using the 'kernel_headers' directory, there is only asm/
# and no other asm-<arch> directories (arm is assumed, which sucks)
@ -126,6 +118,7 @@ if wanted_archs != None:
# helper function used to walk the user files
def parse_file(path, parser):
#print "parse %s" % path
parser.parseFile(path)
@ -136,7 +129,8 @@ def parse_file(path, parser):
# try to read the config file
try:
cparser = kernel.ConfigParser()
cparser.parseFile( wanted_config )
if wanted_config:
cparser.parseFile( wanted_config )
except:
sys.stderr.write( "error: can't parse '%s'" % wanted_config )
sys.exit(1)
@ -145,7 +139,8 @@ kernel_config = cparser.getDefinitions()
# first, obtain the list of kernel files used by our clients
fparser = kernel.HeaderScanner()
walk_source_files( args, parse_file, fparser, excludes=["kernel_headers"] )
dir_excludes=[".repo","external/kernel-headers","ndk","out","prebuilt","bionic/libc/kernel","development/ndk","external/qemu/distrib"]
walk_source_files( args[1:], parse_file, fparser, excludes=["./"+f for f in dir_excludes] )
headers = fparser.getHeaders()
files = fparser.getFiles()
@ -170,6 +165,6 @@ if 0: # just for debugging
sys.exit(0)
for h in sorted(headers):
print h
print "%s" % h
sys.exit(0)

View file

@ -55,8 +55,11 @@ class HeaderScanner:
# <asm-generic/*>
# <mtd/*>
#
re_combined =\
re.compile(r"^.*<((%s)/[\d\w_\+\.\-/]*)>.*$" % string.join(kernel_dirs,"|") )
re_combined_str=\
r"^.*<((%s)/[\d\w_\+\.\-/]*)>.*$" % string.join(kernel_dirs,"|")
re_combined = re.compile(re_combined_str)
# some kernel files choose to include files with relative paths (x86 32/64
# dispatch for instance)
re_rel_dir = re.compile(r'^.*"([\d\w_\+\.\-/]+)".*$')

View file

@ -6,7 +6,7 @@ from utils import *
def usage():
print """\
usage: %(progname)s
usage: %(progname)s [kernel-original-path]
this program is used to update all the auto-generated clean headers
used by the Bionic C library. it assumes the following:
@ -31,13 +31,19 @@ except:
sys.stderr.write( "error: unrecognized option\n" )
usage()
if len(optlist) > 0 or len(args) > 0:
if len(optlist) > 0 or len(args) > 1:
usage()
progdir = find_program_dir()
original_dir = os.path.normpath( progdir + "/../original" )
if not os.path.isdir( original_dir ):
panic( "required directory does not exists: %s\n" % original_dir )
if len(args) == 1:
original_dir = arg[0]
if not os.path.isdir(original_dir):
panic( "Not a directory: %s" % original_dir )
else:
original_dir = kernel_original_path
if not os.path.isdir(original_dir):
panic( "Missing directory, please specify one through command-line: %s" % original_dir )
# find all source files in 'original'
#
@ -57,29 +63,36 @@ b.readDir( os.path.normpath( progdir + "/../common" ) )
#print "OLD " + repr(b.old_files)
oldlen = 120
for path in sources:
dst_path, newdata = clean_header.cleanupFile(path)
dst_path, newdata = clean_header.cleanupFile(path, original_dir)
if not dst_path:
continue
b.readFile( dst_path )
r = b.editFile( dst_path, newdata )
if r == 0:
r = "unchanged"
state = "unchanged"
elif r == 1:
r = "edited"
state = "edited"
else:
r = "added"
state = "added"
print "cleaning: %-*s -> %-*s (%s)" % ( 35, path, 35, dst_path, r )
str = "cleaning: %-*s -> %-*s (%s)" % ( 35, "<original>" + path[len(original_dir):], 35, dst_path, state )
if sys.stdout.isatty():
print "%-*s" % (oldlen,str),
if (r == 0):
print "\r",
else:
print "\n",
oldlen = 0
else:
print str
# We don't use Perforce anymore, but just in case, define ANDROID_USE_P4
# in your environment if you think you need it.
usePerforce = os.environ.has_key("ANDROID_USE_P4")
oldlen = len(str)
if usePerforce:
b.updateP4Files()
else:
b.updateFiles()
print "%-*s" % (oldlen,"Done!")
b.updateGitFiles()
sys.exit(0)

View file

@ -231,6 +231,15 @@ def create_file_path(path):
def walk_source_files(paths,callback,args,excludes=[]):
"""recursively walk a list of paths and files, only keeping the source files in directories"""
for path in paths:
if len(path) > 0 and path[0] == '@':
# this is the name of another file, include it and parse it
path = path[1:]
if os.path.exists(path):
for line in open(path):
if len(line) > 0 and line[-1] == '\n':
line = line[:-1]
walk_source_files([line],callback,args,excludes)
continue
if not os.path.isdir(path):
callback(path,args)
else:
@ -238,7 +247,7 @@ def walk_source_files(paths,callback,args,excludes=[]):
#print "w-- %s (ex: %s)" % (repr((root,dirs)), repr(excludes))
if len(excludes):
for d in dirs[:]:
if d in excludes:
if os.path.join(root,d) in excludes:
dirs.remove(d)
for f in files:
r, ext = os.path.splitext(f)
@ -395,3 +404,19 @@ class BatchFileUpdater:
D2("P4 DELETES: %s" % files)
o = commands.getoutput( "p4 delete " + files )
D2( o )
def updateGitFiles(self):
adds, deletes, edits = self.getChanges()
if adds:
for dst in sorted(adds):
self._writeFile(dst)
commands.getoutput("git add " + " ".join(adds))
if deletes:
commands.getoutput("git rm " + " ".join(deletes))
if edits:
for dst in sorted(edits):
self._writeFile(dst)
commands.getoutput("git add " + " ".join(edits))

View file

@ -105,8 +105,29 @@ def find_bionic_root():
else:
return None
def find_original_kernel_headers():
"""try to find the directory containing the original kernel headers"""
bionic_root = find_bionic_root()
if not bionic_root:
D("Could not find Bionic root !!")
return None
path = os.path.normpath(bionic_root + "/../../external/kernel-headers/original")
if not os.path.isdir(path):
D("Could not find %s" % (path))
return None
return path
def find_kernel_headers():
"""try to find the directory containing the kernel headers for this machine"""
# First try to find the original kernel headers.
ret = find_original_kernel_headers()
if ret:
D("found original kernel headers in: %s" % (ret))
return ret
status, version = commands.getstatusoutput( "uname -r" ) # get Linux kernel version
if status != 0:
D("could not execute 'uname -r' command properly")
@ -116,14 +137,39 @@ def find_kernel_headers():
if len(version) > 5 and version[-5:] == "-xenU":
version = version[:-5]
path = "/usr/src/linux-headers-" + version
D("probing %s for kernel headers" % (path+"/include"))
path = "/usr/src/linux-headers-" + version + "/include"
D("probing %s for kernel headers" % (path))
ret = os.path.isdir( path )
if ret:
D("found kernel headers in: %s" % (path + "/include"))
D("found kernel headers in: %s" % (path))
return path
return None
def find_arch_header(kernel_headers,arch,header):
# First, try in <root>/arch/<arm>/include/<header>
# corresponding to the location in the kernel source tree for
# certain architectures (e.g. arm).
path = "%s/arch/%s/include/asm/%s" % (kernel_headers, arch, header)
D("Probing for %s" % path)
if os.path.exists(path):
return path
# Try <root>/asm-<arch>/include/<header> corresponding to the location
# in the kernel source tree for other architectures (e.g. x86).
path = "%s/include/asm-%s/%s" % (kernel_headers, arch, header)
D("Probing for %s" % path)
if os.path.exists(path):
return path
# Otherwise, look under <root>/asm-<arch>/<header> corresponding
# the original kernel headers directory
path = "%s/asm-%s/%s" % (kernel_headers, arch, header)
D("Probing for %s" % path)
if os.path.exists(path):
return path
return None
# parser for the SYSCALLS.TXT file
#
@ -212,7 +258,12 @@ class SysCallsTxtParser:
E("invalid syscall number in '%s'" % line)
return
print str(syscall_id) + ':' + str(syscall_id2) + ':' + str(syscall_id3)
global verbose
if verbose >= 2:
if call_id < 0:
print "%s: %d,%d,%d" % (syscall_name, syscall_id, syscall_id2, syscall_id3)
else:
print "%s(%d): %d,%d,%d" % (syscall_name, call_id, syscall_id, syscall_id2, syscall_id3)
t = { "id" : syscall_id,
"id2" : syscall_id2,

View file

@ -40,8 +40,8 @@ def parse_command_line(args):
if len(args) == 0:
linux_root = find_kernel_headers()
if linux_root == None:
print "could not locate this system kernel headers root directory, please"
print "specify one when calling this program, i.e. 'checksyscalls <headers-directory>'"
print "Could not locate original or system kernel headers root directory."
print "Please specify one when calling this program, i.e. 'checksyscalls <headers-directory>'"
sys.exit(1)
print "using the following kernel headers root: '%s'" % linux_root
else:
@ -112,62 +112,63 @@ def process_header(header_file,dict):
arm_dict = {}
x86_dict = {}
superh_dict = {}
# remove trailing slash and '/include' from the linux_root, if any
# remove trailing slash from the linux_root, if any
if linux_root[-1] == '/':
linux_root = linux_root[:-1]
if len(linux_root) > 8 and linux_root[-8:] == '/include':
linux_root = linux_root[:-8]
arm_unistd = linux_root + "/include/asm-arm/unistd.h"
if not os.path.exists(arm_unistd):
print "WEIRD: could not locate the ARM unistd.h header file"
print "tried searching in '%s'" % arm_unistd
print "maybe using a different set of kernel headers might help"
arm_unistd = find_arch_header(linux_root, "arm", "unistd.h")
if not arm_unistd:
print "WEIRD: Could not locate the ARM unistd.h kernel header file,"
print "maybe using a different set of kernel headers might help."
sys.exit(1)
# on recent kernels, asm-i386 and asm-x64_64 have been merged into asm-x86
# with two distinct unistd_32.h and unistd_64.h definition files.
# take care of this here
#
x86_unistd = linux_root + "/include/asm-i386/unistd.h"
if not os.path.exists(x86_unistd):
x86_unistd1 = x86_unistd
x86_unistd = linux_root + "/include/asm-x86/unistd_32.h"
if not os.path.exists(x86_unistd):
print "WEIRD: could not locate the i386/x86 unistd.h header file"
print "tried searching in '%s' and '%s'" % (x86_unistd1, x86_unistd)
print "maybe using a different set of kernel headers might help"
x86_unistd = find_arch_header(linux_root, "i386", "unistd.h")
if not x86_unistd:
x86_unistd = find_arch_header(linux_root, "x86", "unistd_32.h")
if not x86_unistd:
print "WEIRD: Could not locate the i386/x86 unistd.h header file,"
print "maybe using a different set of kernel headers might help."
sys.exit(1)
process_header( linux_root+"/include/asm-arm/unistd.h", arm_dict )
superh_unistd = find_arch_header(linux_root, "sh", "unistd_32.h")
if not superh_unistd:
print "WEIRD: Could not locate the SuperH unistd.h kernel header file,"
print "maybe using a different set of kernel headers might help."
sys.exit(1)
process_header( arm_unistd, arm_dict )
process_header( x86_unistd, x86_dict )
process_header( superh_unistd, superh_dict )
# now perform the comparison
errors = 0
for sc in syscalls:
sc_name = sc["name"]
sc_id = sc["id"]
if sc_id >= 0:
if not arm_dict.has_key(sc_name):
print "arm syscall %s not defined !!" % sc_name
errors += 1
elif arm_dict[sc_name] != sc_id:
print "arm syscall %s should be %d instead of %d !!" % (sc_name, arm_dict[sc_name], sc_id)
errors += 1
for sc in syscalls:
sc_name = sc["name"]
sc_id2 = sc["id2"]
if sc_id2 >= 0:
if not x86_dict.has_key(sc_name):
print "x86 syscall %s not defined !!" % sc_name
errors += 1
elif x86_dict[sc_name] != sc_id2:
print "x86 syscall %s should be %d instead of %d !!" % (sc_name, x86_dict[sc_name], sc_id2)
errors += 1
def check_syscalls(archname, idname, arch_dict):
errors = 0
for sc in syscalls:
sc_name = sc["name"]
sc_id = sc[idname]
if sc_id >= 0:
if not arch_dict.has_key(sc_name):
print "%s syscall %s not defined, should be %d !!" % (archname, sc_name, sc_id)
errors += 1
elif not arch_dict.has_key(sc_name):
print "%s syscall %s is not implemented!" % (archname, sc_name)
errors += 1
elif arch_dict[sc_name] != sc_id:
print "%s syscall %s should be %d instead of %d !!" % (archname, sc_name, arch_dict[sc_name], sc_id)
errors += 1
return errors
errors += check_syscalls("arm", "id", arm_dict)
errors += check_syscalls("x86", "id2", x86_dict)
errors += check_syscalls("superh", "id3", superh_dict)
if errors == 0:
print "congratulations, everything's fine !!"

View file

@ -557,7 +557,7 @@ class State:
for sc in self.syscalls:
if sc.has_key("asm-arm") and 'arm' in all_archs:
fname = "arch-arm/syscalls/%s.S" % sc["func"]
D( ">>> generating "+fname )
D2( ">>> generating "+fname )
fp = create_file( fname )
fp.write(sc["asm-arm"])
fp.close()
@ -565,7 +565,7 @@ class State:
if sc.has_key("asm-thumb") and 'arm' in all_archs:
fname = "arch-arm/syscalls/%s.S" % sc["func"]
D( ">>> generating "+fname )
D2( ">>> generating "+fname )
fp = create_file( fname )
fp.write(sc["asm-thumb"])
fp.close()
@ -573,7 +573,7 @@ class State:
if sc.has_key("asm-x86") and 'x86' in all_archs:
fname = "arch-x86/syscalls/%s.S" % sc["func"]
D( ">>> generating "+fname )
D2( ">>> generating "+fname )
fp = create_file( fname )
fp.write(sc["asm-x86"])
fp.close()
@ -581,7 +581,7 @@ class State:
if sc.has_key("asm-sh"):
fname = "arch-sh/syscalls/%s.S" % sc["func"]
D( ">>> generating "+fname )
D2( ">>> generating "+fname )
fp = create_file( fname )
fp.write(sc["asm-sh"])
fp.close()
@ -626,7 +626,7 @@ class State:
for stub in self.new_stubs + self.other_files:
if not os.path.exists( bionic_root + stub ):
# new file, P4 add it
# new file, git add it
D( "new file: " + stub)
adds.append( bionic_root + stub )
shutil.copyfile( bionic_temp + stub, bionic_root + stub )
@ -643,16 +643,21 @@ class State:
if adds:
commands.getoutput("p4 add " + " ".join(adds))
commands.getoutput("git add " + " ".join(adds))
if deletes:
commands.getoutput("p4 delete " + " ".join(deletes))
commands.getoutput("git rm " + " ".join(deletes))
if edits:
commands.getoutput("p4 edit " +
" ".join((bionic_root + file) for file in edits))
for file in edits:
shutil.copyfile( bionic_temp + file, bionic_root + file )
commands.getoutput("git add " +
" ".join((bionic_root + file) for file in edits))
D("ready to go !!")
commands.getoutput("git add %s%s" % (bionic_root,"SYSCALLS.TXT"))
if (not adds) and (not deletes) and (not edits):
D("no changes detected!")
else:
D("ready to go!!")
D_setlevel(1)