2018-03-15 19:56:34 +03:00
#!/usr/bin/env python
2005-12-01 20:34:21 +03:00
#
# This is the API builder, it parses the C sources and build the
# API formal description in XML.
#
# See Copyright for the status of this software.
#
# daniel@veillard.com
#
2018-03-15 12:30:03 +03:00
from __future__ import print_function
2018-03-20 09:48:45 +03:00
import os
import sys
2005-12-01 20:34:21 +03:00
import glob
2013-01-11 14:39:19 +04:00
import re
2005-12-01 20:34:21 +03:00
2018-03-20 09:48:47 +03:00
quiet = True
warnings = 0
debug = False
debugsym = None
2005-12-01 20:34:21 +03:00
#
# C parser analysis code
#
2007-08-14 09:57:07 +04:00
included_files = {
2016-04-21 15:35:29 +03:00
" libvirt-common.h " : " header with general libvirt API definitions " ,
2014-10-23 14:28:16 +04:00
" libvirt-domain.h " : " header with general libvirt API definitions " ,
backup: Introduce virDomainCheckpoint APIs
Introduce a bunch of new public APIs related to backup checkpoints.
Checkpoints are modeled heavily after virDomainSnapshotPtr (both
represent a point in time of the guest), although a snapshot exists
with the intent of rolling back to that state, while a checkpoint
exists to make it possible to create an incremental backup at a later
time. We may have a future hypervisor that can completely manage
checkpoints without libvirt metadata, but the first two planned
hypervisors (qemu and test) both always use libvirt for tracking
metadata relations between checkpoints, so for now, I've deferred
the counterpart of virDomainSnapshotHasMetadata for a separate
API addition at a later date if there is ever a need for it.
Note that until we allow snapshots and checkpoints to exist
simultaneously on the same domain (although the actual prevention of
this will be in a separate patch for the sake of an easier revert down
the road), that it is not possible to branch out to create more than
one checkpoint child to a given parent, although it may become
possible later when we revert to a snapshot that coincides with a
checkpoint. This also means that for now, the decision of which
checkpoint becomes the parent of a newly created one is the only
checkpoint with no child (so while there are APIs for dealing with a
current snapshot, we do not need those for checkpoints). We may end
up exposing a notion of a current checkpoint later, but it's easier to
add stuff when proven needed than to blindly support it now and wish
we hadn't exposed it.
The following map shows the API relations to snapshots, with new APIs
on the right:
Operate on a domain object to create/redefine a child:
virDomainSnapshotCreateXML virDomainCheckpointCreateXML
Operate on a child object for lifetime management:
virDomainSnapshotDelete virDomainCheckpointDelete
virDomainSnapshotFree virDomainCheckpointFree
virDomainSnapshotRef virDomainCheckpointRef
Operate on a child object to learn more about it:
virDomainSnapshotGetXMLDesc virDomainCheckpointGetXMLDesc
virDomainSnapshotGetConnect virDomainCheckpointGetConnect
virDomainSnapshotGetDomain virDomainCheckpointGetDomain
virDomainSnapshotGetName virDomainCheckpiontGetName
virDomainSnapshotGetParent virDomainCheckpiontGetParent
virDomainSnapshotHasMetadata (deferred for later)
virDomainSnapshotIsCurrent (no counterpart, see note above)
Operate on a domain object to list all children:
virDomainSnapshotNum (no counterparts, these are the old
virDomainSnapshotListNames racy interfaces)
virDomainSnapshotListAllSnapshots virDomainListAllCheckpoints
Operate on a child object to list descendents:
virDomainSnapshotNumChildren (no counterparts, these are the old
virDomainSnapshotListChildrenNames racy interfaces)
virDomainSnapshotListAllChildren virDomainCheckpointListAllChildren
Operate on a domain to locate a particular child:
virDomainSnapshotLookupByName virDomainCheckpointLookupByName
virDomainSnapshotCurrent (no counterpart, see note above)
virDomainHasCurrentSnapshot (no counterpart, old racy interface)
Operate on a snapshot to roll back to earlier state:
virDomainSnapshotRevert (no counterpart, instead checkpoints
are used in incremental backups via
XML to virDomainBackupBegin)
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
2019-03-13 22:35:26 +03:00
" libvirt-domain-checkpoint.h " : " header with general libvirt API definitions " ,
2014-10-23 14:28:16 +04:00
" libvirt-domain-snapshot.h " : " header with general libvirt API definitions " ,
2014-10-23 14:28:16 +04:00
" libvirt-event.h " : " header with general libvirt API definitions " ,
2014-10-23 14:28:16 +04:00
" libvirt-host.h " : " header with general libvirt API definitions " ,
2014-10-23 14:28:16 +04:00
" libvirt-interface.h " : " header with general libvirt API definitions " ,
2014-10-23 14:28:16 +04:00
" libvirt-network.h " : " header with general libvirt API definitions " ,
2014-10-23 14:28:16 +04:00
" libvirt-nodedev.h " : " header with general libvirt API definitions " ,
2014-10-23 14:28:16 +04:00
" libvirt-nwfilter.h " : " header with general libvirt API definitions " ,
2014-10-23 14:28:16 +04:00
" libvirt-secret.h " : " header with general libvirt API definitions " ,
2014-10-23 14:28:16 +04:00
" libvirt-storage.h " : " header with general libvirt API definitions " ,
2014-10-23 14:28:16 +04:00
" libvirt-stream.h " : " header with general libvirt API definitions " ,
2007-08-14 09:57:07 +04:00
" virterror.h " : " header with error specific API definitions " ,
" libvirt.c " : " Main interfaces for the libvirt library " ,
2014-10-22 19:29:09 +04:00
" libvirt-domain.c " : " Domain interfaces for the libvirt library " ,
backup: Introduce virDomainCheckpoint APIs
Introduce a bunch of new public APIs related to backup checkpoints.
Checkpoints are modeled heavily after virDomainSnapshotPtr (both
represent a point in time of the guest), although a snapshot exists
with the intent of rolling back to that state, while a checkpoint
exists to make it possible to create an incremental backup at a later
time. We may have a future hypervisor that can completely manage
checkpoints without libvirt metadata, but the first two planned
hypervisors (qemu and test) both always use libvirt for tracking
metadata relations between checkpoints, so for now, I've deferred
the counterpart of virDomainSnapshotHasMetadata for a separate
API addition at a later date if there is ever a need for it.
Note that until we allow snapshots and checkpoints to exist
simultaneously on the same domain (although the actual prevention of
this will be in a separate patch for the sake of an easier revert down
the road), that it is not possible to branch out to create more than
one checkpoint child to a given parent, although it may become
possible later when we revert to a snapshot that coincides with a
checkpoint. This also means that for now, the decision of which
checkpoint becomes the parent of a newly created one is the only
checkpoint with no child (so while there are APIs for dealing with a
current snapshot, we do not need those for checkpoints). We may end
up exposing a notion of a current checkpoint later, but it's easier to
add stuff when proven needed than to blindly support it now and wish
we hadn't exposed it.
The following map shows the API relations to snapshots, with new APIs
on the right:
Operate on a domain object to create/redefine a child:
virDomainSnapshotCreateXML virDomainCheckpointCreateXML
Operate on a child object for lifetime management:
virDomainSnapshotDelete virDomainCheckpointDelete
virDomainSnapshotFree virDomainCheckpointFree
virDomainSnapshotRef virDomainCheckpointRef
Operate on a child object to learn more about it:
virDomainSnapshotGetXMLDesc virDomainCheckpointGetXMLDesc
virDomainSnapshotGetConnect virDomainCheckpointGetConnect
virDomainSnapshotGetDomain virDomainCheckpointGetDomain
virDomainSnapshotGetName virDomainCheckpiontGetName
virDomainSnapshotGetParent virDomainCheckpiontGetParent
virDomainSnapshotHasMetadata (deferred for later)
virDomainSnapshotIsCurrent (no counterpart, see note above)
Operate on a domain object to list all children:
virDomainSnapshotNum (no counterparts, these are the old
virDomainSnapshotListNames racy interfaces)
virDomainSnapshotListAllSnapshots virDomainListAllCheckpoints
Operate on a child object to list descendents:
virDomainSnapshotNumChildren (no counterparts, these are the old
virDomainSnapshotListChildrenNames racy interfaces)
virDomainSnapshotListAllChildren virDomainCheckpointListAllChildren
Operate on a domain to locate a particular child:
virDomainSnapshotLookupByName virDomainCheckpointLookupByName
virDomainSnapshotCurrent (no counterpart, see note above)
virDomainHasCurrentSnapshot (no counterpart, old racy interface)
Operate on a snapshot to roll back to earlier state:
virDomainSnapshotRevert (no counterpart, instead checkpoints
are used in incremental backups via
XML to virDomainBackupBegin)
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
2019-03-13 22:35:26 +03:00
" libvirt-domain-checkpoint.c " : " Domain checkpoint interfaces for the libvirt library " ,
2014-10-22 19:29:09 +04:00
" libvirt-domain-snapshot.c " : " Domain snapshot interfaces for the libvirt library " ,
2014-10-22 19:29:09 +04:00
" libvirt-host.c " : " Host interfaces for the libvirt library " ,
2014-10-22 19:29:09 +04:00
" libvirt-interface.c " : " Interface interfaces for the libvirt library " ,
2014-10-22 19:29:09 +04:00
" libvirt-network.c " : " Network interfaces for the libvirt library " ,
2014-10-22 19:29:09 +04:00
" libvirt-nodedev.c " : " Node device interfaces for the libvirt library " ,
2014-10-22 19:29:09 +04:00
" libvirt-nwfilter.c " : " NWFilter interfaces for the libvirt library " ,
2014-10-22 19:29:09 +04:00
" libvirt-secret.c " : " Secret interfaces for the libvirt library " ,
2014-10-22 19:29:09 +04:00
" libvirt-storage.c " : " Storage interfaces for the libvirt library " ,
2014-10-22 19:29:09 +04:00
" libvirt-stream.c " : " Stream interfaces for the libvirt library " ,
2013-01-29 19:24:35 +04:00
" virerror.c " : " implements error handling and reporting code for libvirt " ,
2013-01-21 21:40:28 +04:00
" virevent.c " : " event loop for monitoring file handles " ,
2019-09-17 20:21:14 +03:00
" virtypedparam-public.c " : " virTypedParameters APIs " ,
2005-12-01 20:34:21 +03:00
}
2011-09-09 14:55:21 +04:00
qemu_included_files = {
" libvirt-qemu.h " : " header with QEMU specific API definitions " ,
" libvirt-qemu.c " : " Implementations for the QEMU specific APIs " ,
}
Introduce an LXC specific public API & library
This patch introduces support for LXC specific public APIs. In
common with what was done for QEMU, this creates a libvirt_lxc.so
library and libvirt/libvirt-lxc.h header file.
The actual APIs are
int virDomainLxcOpenNamespace(virDomainPtr domain,
int **fdlist,
unsigned int flags);
int virDomainLxcEnterNamespace(virDomainPtr domain,
unsigned int nfdlist,
int *fdlist,
unsigned int *noldfdlist,
int **oldfdlist,
unsigned int flags);
which provide a way to use the setns() system call to move the
calling process into the container's namespace. It is not
practical to write in a generically applicable manner. The
nearest that we could get to such an API would be an API which
allows to pass a command + argv to be executed inside a
container. Even if we had such a generic API, this LXC specific
API is still useful, because it allows the caller to maintain
the current process context, in particular any I/O streams they
have open.
NB the virDomainLxcEnterNamespace() API is special in that it
runs client side, so does not involve the internal driver API.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-12-21 17:15:19 +04:00
lxc_included_files = {
" libvirt-lxc.h " : " header with LXC specific API definitions " ,
" libvirt-lxc.c " : " Implementations for the LXC specific APIs " ,
}
2015-04-15 17:23:25 +03:00
admin_included_files = {
" libvirt-admin.h " : " header with admin specific API definitions " ,
2019-10-15 13:41:29 +03:00
" admin/libvirt-admin.c " : " Implementations for the admin specific APIs " ,
2015-04-15 17:23:25 +03:00
}
2005-12-01 20:34:21 +03:00
ignored_words = {
2019-10-14 15:31:15 +03:00
" G_GNUC_UNUSED " : ( 0 , " macro keyword " ) ,
" G_GNUC_NULL_TERMINATED " : ( 0 , " macro keyword " ) ,
2008-07-25 12:52:19 +04:00
" VIR_DEPRECATED " : ( 0 , " macro keyword " ) ,
2010-03-17 01:54:22 +03:00
" VIR_EXPORT_VAR " : ( 0 , " macro keyword " ) ,
2009-01-20 18:42:07 +03:00
" WINAPI " : ( 0 , " Windows keyword " ) ,
" __declspec " : ( 3 , " Windows keyword " ) ,
" __stdcall " : ( 0 , " Windows keyword " ) ,
2005-12-01 20:34:21 +03:00
}
2008-11-25 18:48:11 +03:00
ignored_functions = {
2013-04-22 21:26:01 +04:00
" virConnectSupportsFeature " : " private function for remote access " ,
2008-11-25 18:48:11 +03:00
" virDomainMigrateFinish " : " private function for migration " ,
" virDomainMigrateFinish2 " : " private function for migration " ,
" virDomainMigratePerform " : " private function for migration " ,
" virDomainMigratePrepare " : " private function for migration " ,
" virDomainMigratePrepare2 " : " private function for migration " ,
2009-09-30 14:51:54 +04:00
" virDomainMigratePrepareTunnel " : " private function for tunnelled migration " ,
Introduce yet another migration version in API.
Migration just seems to go from bad to worse. We already had to
introduce a second migration protocol when adding the QEMU driver,
since the one from Xen was insufficiently flexible to cope with
passing the data the QEMU driver required.
It turns out that this protocol still has some flaws that we
need to address. The current sequence is
* Src: DumpXML
- Generate XML to pass to dst
* Dst: Prepare
- Get ready to accept incoming VM
- Generate optional cookie to pass to src
* Src: Perform
- Start migration and wait for send completion
- Kill off VM if successful, resume if failed
* Dst: Finish
- Wait for recv completion and check status
- Kill off VM if unsuccessful
The problems with this are:
- Since the first step is a generic 'DumpXML' call, we can't
add in other migration specific data. eg, we can't include
any VM lease data from lock manager plugins
- Since the first step is a generic 'DumpXML' call, we can't
emit any 'migration begin' event on the source, or have
any hook that runs right at the start of the process
- Since there is no final step on the source, if the Finish
method fails to receive all migration data & has to kill
the VM, then there's no way to resume the original VM
on the source
This patch attempts to introduce a version 3 that uses the
improved 5 step sequence
* Src: Begin
- Generate XML to pass to dst
- Generate optional cookie to pass to dst
* Dst: Prepare
- Get ready to accept incoming VM
- Generate optional cookie to pass to src
* Src: Perform
- Start migration and wait for send completion
- Generate optional cookie to pass to dst
* Dst: Finish
- Wait for recv completion and check status
- Kill off VM if failed, resume if success
- Generate optional cookie to pass to src
* Src: Confirm
- Kill off VM if success, resume if failed
The API is designed to allow both input and output cookies
in all methods where applicable. This lets us pass around
arbitrary extra driver specific data between src & dst during
migration. Combined with the extra 'Begin' method this lets
us pass lease information from source to dst at the start of
migration
Moving the killing of the source VM out of Perform and
into Confirm, means we can now recover if the dst host
can't successfully Finish receiving migration data.
2010-11-02 15:43:44 +03:00
" virDomainMigrateBegin3 " : " private function for migration " ,
" virDomainMigrateFinish3 " : " private function for migration " ,
" virDomainMigratePerform3 " : " private function for migration " ,
" virDomainMigratePrepare3 " : " private function for migration " ,
" virDomainMigrateConfirm3 " : " private function for migration " ,
" virDomainMigratePrepareTunnel3 " : " private function for tunnelled migration " ,
2009-07-30 18:30:50 +04:00
" DllMain " : " specific function for Win32 " ,
2013-05-03 17:34:10 +04:00
" virTypedParamsValidate " : " internal function in virtypedparam.c " ,
2014-10-24 19:54:29 +04:00
" virTypedParameterValidateSet " : " internal function in virtypedparam.c " ,
2013-01-21 21:41:26 +04:00
" virTypedParameterAssign " : " internal function in virtypedparam.c " ,
" virTypedParameterAssignFromStr " : " internal function in virtypedparam.c " ,
2013-06-06 20:54:48 +04:00
" virTypedParameterToString " : " internal function in virtypedparam.c " ,
2013-05-06 14:04:06 +04:00
" virTypedParamsCheck " : " internal function in virtypedparam.c " ,
2013-06-07 18:34:13 +04:00
" virTypedParamsCopy " : " internal function in virtypedparam.c " ,
2013-05-20 18:59:08 +04:00
" virDomainMigrateBegin3Params " : " private function for migration " ,
" virDomainMigrateFinish3Params " : " private function for migration " ,
" virDomainMigratePerform3Params " : " private function for migration " ,
" virDomainMigratePrepare3Params " : " private function for migration " ,
" virDomainMigrateConfirm3Params " : " private function for migration " ,
" virDomainMigratePrepareTunnel3Params " : " private function for tunnelled migration " ,
2015-07-10 09:44:41 +03:00
" virErrorCopyNew " : " private " ,
2008-11-25 18:48:11 +03:00
}
2011-05-31 12:41:37 +04:00
ignored_macros = {
" _virSchedParameter " : " backward compatibility macro for virTypedParameter " ,
" _virBlkioParameter " : " backward compatibility macro for virTypedParameter " ,
" _virMemoryParameter " : " backward compatibility macro for virTypedParameter " ,
}
2016-04-21 15:31:23 +03:00
# macros that should be completely skipped
hidden_macros = {
2016-04-22 11:08:56 +03:00
" VIR_DEPRECATED " : " internal macro to mark deprecated apis " ,
" VIR_EXPORT_VAR " : " internal macro to mark exported vars " ,
2016-04-21 15:31:23 +03:00
}
2019-09-24 15:55:56 +03:00
2005-12-01 20:34:21 +03:00
def escape ( raw ) :
2018-03-15 12:42:44 +03:00
raw = raw . replace ( ' & ' , ' & ' )
raw = raw . replace ( ' < ' , ' < ' )
raw = raw . replace ( ' > ' , ' > ' )
raw = raw . replace ( " ' " , ' ' ' )
raw = raw . replace ( ' " ' , ' " ' )
2005-12-01 20:34:21 +03:00
return raw
2019-09-24 15:55:56 +03:00
2005-12-01 20:34:21 +03:00
def uniq ( items ) :
2018-03-20 09:48:56 +03:00
return sorted ( set ( items ) )
2005-12-01 20:34:21 +03:00
2019-09-24 15:55:56 +03:00
2005-12-01 20:34:21 +03:00
class identifier :
2018-03-20 09:48:54 +03:00
def __init__ ( self , name , header = None , module = None , type = None , lineno = 0 ,
info = None , extra = None , conditionals = None ) :
2005-12-01 20:34:21 +03:00
self . name = name
2011-02-16 18:57:50 +03:00
self . header = header
self . module = module
self . type = type
self . info = info
self . extra = extra
self . lineno = lineno
self . static = 0
2013-08-22 13:16:03 +04:00
if conditionals is None or len ( conditionals ) == 0 :
2011-02-16 18:57:50 +03:00
self . conditionals = None
else :
self . conditionals = conditionals [ : ]
2011-05-12 14:19:42 +04:00
if self . name == debugsym and not quiet :
2018-03-15 12:30:03 +03:00
print ( " => define %s : %s " % ( debugsym , ( module , type , info ,
extra , conditionals ) ) )
2005-12-01 20:34:21 +03:00
def __repr__ ( self ) :
r = " %s %s : " % ( self . type , self . name )
2011-02-16 18:57:50 +03:00
if self . static :
r = r + " static "
2013-08-22 13:16:03 +04:00
if self . module is not None :
2018-03-20 09:49:07 +03:00
r = r + " from %s " % self . module
2013-08-22 13:16:03 +04:00
if self . info is not None :
2018-03-15 12:36:06 +03:00
r = r + " " + repr ( self . info )
2013-08-22 13:16:03 +04:00
if self . extra is not None :
2018-03-15 12:36:06 +03:00
r = r + " " + repr ( self . extra )
2013-08-22 13:16:03 +04:00
if self . conditionals is not None :
2018-03-15 12:36:06 +03:00
r = r + " " + repr ( self . conditionals )
2011-02-16 18:57:50 +03:00
return r
2005-12-01 20:34:21 +03:00
def set_header ( self , header ) :
self . header = header
2019-09-24 15:55:56 +03:00
2005-12-01 20:34:21 +03:00
def set_module ( self , module ) :
self . module = module
2019-09-24 15:55:56 +03:00
2005-12-01 20:34:21 +03:00
def set_type ( self , type ) :
self . type = type
2019-09-24 15:55:56 +03:00
2005-12-01 20:34:21 +03:00
def set_info ( self , info ) :
self . info = info
2019-09-24 15:55:56 +03:00
2005-12-01 20:34:21 +03:00
def set_extra ( self , extra ) :
self . extra = extra
2019-09-24 15:55:56 +03:00
2005-12-01 20:34:21 +03:00
def set_lineno ( self , lineno ) :
self . lineno = lineno
2019-09-24 15:55:56 +03:00
2005-12-01 20:34:21 +03:00
def set_static ( self , static ) :
self . static = static
2019-09-24 15:55:56 +03:00
2005-12-01 20:34:21 +03:00
def set_conditionals ( self , conditionals ) :
2013-08-22 13:16:03 +04:00
if conditionals is None or len ( conditionals ) == 0 :
2011-02-16 18:57:50 +03:00
self . conditionals = None
else :
self . conditionals = conditionals [ : ]
2005-12-01 20:34:21 +03:00
def get_name ( self ) :
return self . name
2019-09-24 15:55:56 +03:00
2005-12-01 20:34:21 +03:00
def get_header ( self ) :
return self . module
2019-09-24 15:55:56 +03:00
2005-12-01 20:34:21 +03:00
def get_module ( self ) :
return self . module
2019-09-24 15:55:56 +03:00
2005-12-01 20:34:21 +03:00
def get_type ( self ) :
return self . type
2019-09-24 15:55:56 +03:00
2005-12-01 20:34:21 +03:00
def get_info ( self ) :
return self . info
2019-09-24 15:55:56 +03:00
2005-12-01 20:34:21 +03:00
def get_lineno ( self ) :
return self . lineno
2019-09-24 15:55:56 +03:00
2005-12-01 20:34:21 +03:00
def get_extra ( self ) :
return self . extra
2019-09-24 15:55:56 +03:00
2005-12-01 20:34:21 +03:00
def get_static ( self ) :
return self . static
2019-09-24 15:55:56 +03:00
2005-12-01 20:34:21 +03:00
def get_conditionals ( self ) :
return self . conditionals
2018-03-20 09:48:54 +03:00
def update ( self , header , module , type = None , info = None , extra = None ,
2005-12-01 20:34:21 +03:00
conditionals = None ) :
2011-05-12 14:19:42 +04:00
if self . name == debugsym and not quiet :
2018-03-15 12:30:03 +03:00
print ( " => update %s : %s " % ( debugsym , ( module , type , info ,
extra , conditionals ) ) )
2013-08-22 13:16:03 +04:00
if header is not None and self . header is None :
2011-02-16 18:57:50 +03:00
self . set_header ( module )
2013-08-22 13:16:03 +04:00
if module is not None and ( self . module is None or self . header == self . module ) :
2011-02-16 18:57:50 +03:00
self . set_module ( module )
2013-08-22 13:16:03 +04:00
if type is not None and self . type is None :
2011-02-16 18:57:50 +03:00
self . set_type ( type )
2013-08-22 13:16:03 +04:00
if info is not None :
2011-02-16 18:57:50 +03:00
self . set_info ( info )
2013-08-22 13:16:03 +04:00
if extra is not None :
2011-02-16 18:57:50 +03:00
self . set_extra ( extra )
2013-08-22 13:16:03 +04:00
if conditionals is not None :
2011-02-16 18:57:50 +03:00
self . set_conditionals ( conditionals )
2005-12-01 20:34:21 +03:00
2019-09-24 15:55:56 +03:00
2005-12-01 20:34:21 +03:00
class index :
2018-03-20 09:48:54 +03:00
def __init__ ( self , name = " noname " ) :
2005-12-01 20:34:21 +03:00
self . name = name
self . identifiers = { }
self . functions = { }
2011-02-16 18:57:50 +03:00
self . variables = { }
self . includes = { }
self . structs = { }
2011-06-20 07:25:34 +04:00
self . unions = { }
2011-02-16 18:57:50 +03:00
self . enums = { }
self . typedefs = { }
self . macros = { }
self . references = { }
self . info = { }
2005-12-01 20:34:21 +03:00
2016-04-25 15:23:04 +03:00
def warning ( self , msg ) :
global warnings
warnings = warnings + 1
2018-03-15 12:30:03 +03:00
print ( msg )
2016-04-25 15:23:04 +03:00
2018-03-20 09:48:54 +03:00
def add_ref ( self , name , header , module , static , type , lineno , info = None , extra = None , conditionals = None ) :
2005-12-01 20:34:21 +03:00
if name [ 0 : 2 ] == ' __ ' :
2011-02-16 18:57:50 +03:00
return None
2005-12-01 20:34:21 +03:00
d = None
try :
2018-03-20 09:49:00 +03:00
d = self . identifiers [ name ]
d . update ( header , module , type , lineno , info , extra , conditionals )
2019-09-24 15:42:51 +03:00
except Exception :
2018-03-20 09:49:00 +03:00
d = identifier ( name , header , module , type , lineno , info , extra ,
conditionals )
self . identifiers [ name ] = d
2005-12-01 20:34:21 +03:00
2013-08-22 13:16:03 +04:00
if d is not None and static == 1 :
2011-02-16 18:57:50 +03:00
d . set_static ( 1 )
2005-12-01 20:34:21 +03:00
2013-08-22 13:16:03 +04:00
if d is not None and name is not None and type is not None :
2011-02-16 18:57:50 +03:00
self . references [ name ] = d
2005-12-01 20:34:21 +03:00
2011-05-12 14:19:42 +04:00
if name == debugsym and not quiet :
2018-03-15 12:30:03 +03:00
print ( " New ref: %s " % ( d ) )
2005-12-01 20:34:21 +03:00
2011-02-16 18:57:50 +03:00
return d
2005-12-01 20:34:21 +03:00
2018-03-20 09:49:00 +03:00
def add ( self , name , header , module , static , type , lineno , info = None ,
extra = None , conditionals = None ) :
2005-12-01 20:34:21 +03:00
if name [ 0 : 2 ] == ' __ ' :
2011-02-16 18:57:50 +03:00
return None
2005-12-01 20:34:21 +03:00
d = None
try :
2018-03-20 09:49:00 +03:00
d = self . identifiers [ name ]
d . update ( header , module , type , lineno , info , extra , conditionals )
2019-09-24 15:42:51 +03:00
except Exception :
2018-03-20 09:49:00 +03:00
d = identifier ( name , header , module , type , lineno , info , extra ,
conditionals )
self . identifiers [ name ] = d
2011-02-16 18:57:50 +03:00
2013-08-22 13:16:03 +04:00
if d is not None and static == 1 :
2011-02-16 18:57:50 +03:00
d . set_static ( 1 )
2013-08-22 13:16:03 +04:00
if d is not None and name is not None and type is not None :
2018-03-20 09:49:03 +03:00
type_map = {
" function " : self . functions ,
" functype " : self . functions ,
" variable " : self . variables ,
" include " : self . includes ,
" struct " : self . structs ,
" union " : self . unions ,
" enum " : self . enums ,
" typedef " : self . typedefs ,
" macro " : self . macros
}
if type in type_map :
type_map [ type ] [ name ] = d
2011-02-16 18:57:50 +03:00
else :
2011-05-12 14:19:42 +04:00
self . warning ( " Unable to register type " , type )
2011-02-16 18:57:50 +03:00
2011-05-12 14:19:42 +04:00
if name == debugsym and not quiet :
2018-03-15 12:30:03 +03:00
print ( " New symbol: %s " % ( d ) )
2011-02-16 18:57:50 +03:00
return d
2005-12-01 20:34:21 +03:00
def merge ( self , idx ) :
for id in idx . functions . keys ( ) :
2018-03-20 09:49:00 +03:00
#
# macro might be used to override functions or variables
# definitions
#
if id in self . macros :
del self . macros [ id ]
if id in self . functions :
self . warning ( " function %s from %s redeclared in %s " % (
2011-05-12 14:19:42 +04:00
id , self . functions [ id ] . header , idx . functions [ id ] . header ) )
2018-03-20 09:49:00 +03:00
else :
self . functions [ id ] = idx . functions [ id ]
self . identifiers [ id ] = idx . functions [ id ]
2005-12-01 20:34:21 +03:00
for id in idx . variables . keys ( ) :
2018-03-20 09:49:00 +03:00
#
# macro might be used to override functions or variables
# definitions
#
if id in self . macros :
del self . macros [ id ]
if id in self . variables :
self . warning ( " variable %s from %s redeclared in %s " % (
2011-05-12 14:19:42 +04:00
id , self . variables [ id ] . header , idx . variables [ id ] . header ) )
2018-03-20 09:49:00 +03:00
else :
self . variables [ id ] = idx . variables [ id ]
self . identifiers [ id ] = idx . variables [ id ]
2005-12-01 20:34:21 +03:00
for id in idx . structs . keys ( ) :
2018-03-20 09:49:00 +03:00
if id in self . structs :
self . warning ( " struct %s from %s redeclared in %s " % (
2011-05-12 14:19:42 +04:00
id , self . structs [ id ] . header , idx . structs [ id ] . header ) )
2018-03-20 09:49:00 +03:00
else :
self . structs [ id ] = idx . structs [ id ]
self . identifiers [ id ] = idx . structs [ id ]
2011-06-20 07:25:34 +04:00
for id in idx . unions . keys ( ) :
2018-03-20 09:49:00 +03:00
if id in self . unions :
print ( " union %s from %s redeclared in %s " % (
2018-03-15 12:30:03 +03:00
id , self . unions [ id ] . header , idx . unions [ id ] . header ) )
2018-03-20 09:49:00 +03:00
else :
self . unions [ id ] = idx . unions [ id ]
self . identifiers [ id ] = idx . unions [ id ]
2005-12-01 20:34:21 +03:00
for id in idx . typedefs . keys ( ) :
2018-03-20 09:49:00 +03:00
if id in self . typedefs :
self . warning ( " typedef %s from %s redeclared in %s " % (
2011-05-12 14:19:42 +04:00
id , self . typedefs [ id ] . header , idx . typedefs [ id ] . header ) )
2018-03-20 09:49:00 +03:00
else :
self . typedefs [ id ] = idx . typedefs [ id ]
self . identifiers [ id ] = idx . typedefs [ id ]
2005-12-01 20:34:21 +03:00
for id in idx . macros . keys ( ) :
2018-03-20 09:49:00 +03:00
#
# macro might be used to override functions or variables
# definitions
#
if id in self . variables :
continue
if id in self . functions :
continue
if id in self . enums :
continue
if id in self . macros :
self . warning ( " macro %s from %s redeclared in %s " % (
2011-05-12 14:19:42 +04:00
id , self . macros [ id ] . header , idx . macros [ id ] . header ) )
2018-03-20 09:49:00 +03:00
else :
self . macros [ id ] = idx . macros [ id ]
self . identifiers [ id ] = idx . macros [ id ]
2005-12-01 20:34:21 +03:00
for id in idx . enums . keys ( ) :
2018-03-20 09:49:00 +03:00
if id in self . enums :
self . warning ( " enum %s from %s redeclared in %s " % (
2011-05-12 14:19:42 +04:00
id , self . enums [ id ] . header , idx . enums [ id ] . header ) )
2018-03-20 09:49:00 +03:00
else :
self . enums [ id ] = idx . enums [ id ]
self . identifiers [ id ] = idx . enums [ id ]
2005-12-01 20:34:21 +03:00
def merge_public ( self , idx ) :
for id in idx . functions . keys ( ) :
2018-03-20 09:49:00 +03:00
if id in self . functions :
up = idx . functions [ id ]
# check that function condition agrees with header
if up . conditionals != self . functions [ id ] . conditionals :
self . warning ( " Header condition differs from Function "
" for %s : " % id )
self . warning ( " H: %s " % self . functions [ id ] . conditionals )
self . warning ( " C: %s " % up . conditionals )
self . functions [ id ] . update ( None , up . module , up . type , up . info ,
up . extra )
# else:
# print("Function %s from %s is not declared in headers" % (
# id, idx.functions[id].module))
# TODO: do the same for variables.
2005-12-01 20:34:21 +03:00
def analyze_dict ( self , type , dict ) :
count = 0
2011-02-16 18:57:50 +03:00
public = 0
2005-12-01 20:34:21 +03:00
for name in dict . keys ( ) :
2011-02-16 18:57:50 +03:00
id = dict [ name ]
count = count + 1
if id . static == 0 :
public = public + 1
2005-12-01 20:34:21 +03:00
if count != public :
2018-03-15 12:30:03 +03:00
print ( " %d %s , %d public " % ( count , type , public ) )
2011-02-16 18:57:50 +03:00
elif count != 0 :
2018-03-15 12:30:03 +03:00
print ( " %d public %s " % ( count , type ) )
2005-12-01 20:34:21 +03:00
def analyze ( self ) :
2011-05-12 14:19:42 +04:00
if not quiet :
self . analyze_dict ( " functions " , self . functions )
self . analyze_dict ( " variables " , self . variables )
self . analyze_dict ( " structs " , self . structs )
self . analyze_dict ( " unions " , self . unions )
self . analyze_dict ( " typedefs " , self . typedefs )
self . analyze_dict ( " macros " , self . macros )
2008-02-05 22:27:37 +03:00
2019-09-24 15:55:56 +03:00
2005-12-01 20:34:21 +03:00
class CLexer :
""" A lexer for the C language, tokenize the input by reading and
analyzing it line by line """
def __init__ ( self , input ) :
self . input = input
2011-02-16 18:57:50 +03:00
self . tokens = [ ]
self . line = " "
self . lineno = 0
2005-12-01 20:34:21 +03:00
def getline ( self ) :
line = ' '
2011-02-16 18:57:50 +03:00
while line == ' ' :
line = self . input . readline ( )
if not line :
return None
2018-03-20 09:49:08 +03:00
self . lineno + = 1
line = line . strip ( )
2011-02-16 18:57:50 +03:00
if line == ' ' :
continue
while line [ - 1 ] == ' \\ ' :
line = line [ : - 1 ]
2018-03-20 09:49:08 +03:00
n = self . input . readline ( ) . strip ( )
self . lineno + = 1
2011-02-16 18:57:50 +03:00
if not n :
break
2018-03-20 09:49:08 +03:00
line + = n
2005-12-01 20:34:21 +03:00
return line
2008-02-05 22:27:37 +03:00
2005-12-01 20:34:21 +03:00
def getlineno ( self ) :
return self . lineno
def push ( self , token ) :
2013-02-07 11:22:01 +04:00
self . tokens . insert ( 0 , token )
2005-12-01 20:34:21 +03:00
def debug ( self ) :
2018-03-15 12:30:03 +03:00
print ( " Last token: " , self . last )
print ( " Token queue: " , self . tokens )
2018-03-20 09:49:07 +03:00
print ( " Line %d end: " % self . lineno , self . line )
2005-12-01 20:34:21 +03:00
def token ( self ) :
while self . tokens == [ ] :
2011-02-16 18:57:50 +03:00
if self . line == " " :
line = self . getline ( )
else :
line = self . line
self . line = " "
2013-08-22 13:16:03 +04:00
if line is None :
2011-02-16 18:57:50 +03:00
return None
if line [ 0 ] == ' # ' :
2018-03-20 09:49:04 +03:00
self . tokens = [ ( ' preproc ' , word ) for word in line . split ( ) ]
2014-12-04 15:06:32 +03:00
# We might have whitespace between the '#' and preproc
# macro name, so instead of having a single token element
# of '#define' we might end up with '#' and 'define'. This
# merges them back together
if self . tokens [ 0 ] [ 1 ] == " # " :
2018-03-20 09:49:05 +03:00
self . tokens [ 0 ] = ( ' preproc ' , " # " + self . tokens [ 1 ] [ 1 ] )
del self . tokens [ 1 ]
2013-02-07 11:22:01 +04:00
break
2019-09-24 15:40:44 +03:00
nline = len ( line )
2011-02-16 18:57:50 +03:00
if line [ 0 ] == ' " ' or line [ 0 ] == " ' " :
2018-03-20 09:49:06 +03:00
quote = line [ 0 ]
i = 1
while quote not in line [ i : ] :
i = len ( line )
nextline = self . getline ( )
if nextline is None :
return None
line + = nextline
tok , self . line = line [ 1 : ] . split ( quote , 1 )
2011-02-16 18:57:50 +03:00
self . last = ( ' string ' , tok )
return self . last
2018-03-20 09:48:53 +03:00
if line . startswith ( " /* " ) :
2011-02-16 18:57:50 +03:00
line = line [ 2 : ]
found = 0
tok = " "
while found == 0 :
i = 0
2019-09-24 15:40:44 +03:00
nline = len ( line )
while i < nline :
if line [ i ] == ' * ' and i + 1 < nline and line [ i + 1 ] == ' / ' :
2011-02-16 18:57:50 +03:00
self . line = line [ i + 2 : ]
line = line [ : i - 1 ]
2019-09-24 15:40:44 +03:00
nline = i
2011-02-16 18:57:50 +03:00
found = 1
break
i = i + 1
if tok != " " :
tok = tok + " \n "
tok = tok + line
if found == 0 :
line = self . getline ( )
2013-08-22 13:16:03 +04:00
if line is None :
2011-02-16 18:57:50 +03:00
return None
self . last = ( ' comment ' , tok )
return self . last
2018-03-20 09:48:53 +03:00
if line . startswith ( " // " ) :
2011-02-16 18:57:50 +03:00
line = line [ 2 : ]
self . last = ( ' comment ' , line )
return self . last
i = 0
2019-09-24 15:40:44 +03:00
while i < nline :
if line [ i ] == ' / ' and i + 1 < nline and line [ i + 1 ] == ' / ' :
2011-02-16 18:57:50 +03:00
self . line = line [ i : ]
line = line [ : i ]
break
2019-09-24 15:40:44 +03:00
if line [ i ] == ' / ' and i + 1 < nline and line [ i + 1 ] == ' * ' :
2011-02-16 18:57:50 +03:00
self . line = line [ i : ]
line = line [ : i ]
break
if line [ i ] == ' " ' or line [ i ] == " ' " :
self . line = line [ i : ]
line = line [ : i ]
break
i = i + 1
2019-09-24 15:40:44 +03:00
nline = len ( line )
2011-02-16 18:57:50 +03:00
i = 0
2019-09-24 15:40:44 +03:00
while i < nline :
2011-02-16 18:57:50 +03:00
if line [ i ] == ' ' or line [ i ] == ' \t ' :
i = i + 1
continue
2018-03-20 09:48:53 +03:00
if line [ i ] . isalnum ( ) :
2011-02-16 18:57:50 +03:00
s = i
2019-09-24 15:40:44 +03:00
while i < nline :
2018-03-20 09:48:53 +03:00
if line [ i ] not in " \t () {} :;,+-*/ % &!|[]=>< " :
2011-02-16 18:57:50 +03:00
i = i + 1
else :
break
self . tokens . append ( ( ' name ' , line [ s : i ] ) )
continue
2018-03-20 09:48:53 +03:00
if line [ i ] in " () {} :;,[] " :
2011-02-16 18:57:50 +03:00
self . tokens . append ( ( ' sep ' , line [ i ] ) )
i = i + 1
continue
2018-03-20 09:48:53 +03:00
if line [ i ] in " +-*><=/ % &!|. " :
2019-09-24 15:40:44 +03:00
if line [ i ] == ' . ' and i + 2 < nline and \
2011-02-16 18:57:50 +03:00
line [ i + 1 ] == ' . ' and line [ i + 2 ] == ' . ' :
self . tokens . append ( ( ' name ' , ' ... ' ) )
i = i + 3
continue
j = i + 1
2019-09-24 15:40:44 +03:00
if j < nline and line [ j ] in " +-*><=/ % &!| " :
2011-02-16 18:57:50 +03:00
self . tokens . append ( ( ' op ' , line [ i : j + 1 ] ) )
i = j + 1
else :
self . tokens . append ( ( ' op ' , line [ i ] ) )
i = i + 1
continue
s = i
2019-09-24 15:40:44 +03:00
while i < nline :
2018-03-20 09:48:53 +03:00
if line [ i ] not in " \t () {} :;,+-*/ % &!|[]=>< " :
2011-02-16 18:57:50 +03:00
i = i + 1
else :
break
self . tokens . append ( ( ' name ' , line [ s : i ] ) )
tok = self . tokens [ 0 ]
self . tokens = self . tokens [ 1 : ]
self . last = tok
return tok
2008-02-05 22:27:37 +03:00
2019-09-24 15:55:56 +03:00
2005-12-01 20:34:21 +03:00
class CParser :
""" The C module parser """
2018-03-20 09:48:54 +03:00
def __init__ ( self , filename , idx = None ) :
2005-12-01 20:34:21 +03:00
self . filename = filename
2011-02-16 18:57:50 +03:00
if len ( filename ) > 2 and filename [ - 2 : ] == ' .h ' :
self . is_header = 1
else :
self . is_header = 0
2005-12-01 20:34:21 +03:00
self . input = open ( filename )
2011-02-16 18:57:50 +03:00
self . lexer = CLexer ( self . input )
2013-08-22 13:16:03 +04:00
if idx is None :
2011-02-16 18:57:50 +03:00
self . index = index ( )
else :
self . index = idx
self . top_comment = " "
self . last_comment = " "
self . comment = None
self . collect_ref = 0
self . no_error = 0
self . conditionals = [ ]
self . defines = [ ]
2005-12-01 20:34:21 +03:00
def collect_references ( self ) :
self . collect_ref = 1
def stop_error ( self ) :
self . no_error = 1
def start_error ( self ) :
self . no_error = 0
def lineno ( self ) :
return self . lexer . getlineno ( )
2018-03-20 09:48:54 +03:00
def index_add ( self , name , module , static , type , info = None , extra = None ) :
2011-02-16 18:57:50 +03:00
if self . is_header == 1 :
self . index . add ( name , module , module , static , type , self . lineno ( ) ,
info , extra , self . conditionals )
else :
self . index . add ( name , None , module , static , type , self . lineno ( ) ,
info , extra , self . conditionals )
2005-12-01 20:34:21 +03:00
def index_add_ref ( self , name , module , static , type , info = None ,
2018-03-20 09:48:54 +03:00
extra = None ) :
2011-02-16 18:57:50 +03:00
if self . is_header == 1 :
self . index . add_ref ( name , module , module , static , type ,
self . lineno ( ) , info , extra , self . conditionals )
else :
self . index . add_ref ( name , None , module , static , type , self . lineno ( ) ,
info , extra , self . conditionals )
2005-12-01 20:34:21 +03:00
def warning ( self , msg ) :
2011-05-12 14:19:42 +04:00
global warnings
warnings = warnings + 1
2005-12-01 20:34:21 +03:00
if self . no_error :
2011-02-16 18:57:50 +03:00
return
2018-03-15 12:30:03 +03:00
print ( msg )
2005-12-01 20:34:21 +03:00
def error ( self , msg , token = - 1 ) :
if self . no_error :
2011-02-16 18:57:50 +03:00
return
2005-12-01 20:34:21 +03:00
2018-03-15 12:30:03 +03:00
print ( " Parse Error: " + msg )
2011-02-16 18:57:50 +03:00
if token != - 1 :
2018-03-15 12:30:03 +03:00
print ( " Got token " , token )
2011-02-16 18:57:50 +03:00
self . lexer . debug ( )
sys . exit ( 1 )
2005-12-01 20:34:21 +03:00
def debug ( self , msg , token = - 1 ) :
2018-03-15 12:30:03 +03:00
print ( " Debug: " + msg )
2011-02-16 18:57:50 +03:00
if token != - 1 :
2018-03-15 12:30:03 +03:00
print ( " Got token " , token )
2011-02-16 18:57:50 +03:00
self . lexer . debug ( )
2005-12-01 20:34:21 +03:00
def parseTopComment ( self , comment ) :
2011-02-16 18:57:50 +03:00
res = { }
2018-03-15 12:42:44 +03:00
lines = comment . split ( " \n " )
2011-02-16 18:57:50 +03:00
item = None
for line in lines :
2013-01-10 19:02:23 +04:00
line = line . lstrip ( ) . lstrip ( ' * ' ) . lstrip ( )
2013-01-11 14:39:19 +04:00
2019-09-24 15:29:27 +03:00
m = re . match ( r ' ([_.a-zA-Z0-9]+):(.*) ' , line )
2013-01-11 14:39:19 +04:00
if m :
item = m . group ( 1 )
line = m . group ( 2 ) . lstrip ( )
if item :
2018-03-15 12:39:49 +03:00
if item in res :
2011-02-16 18:57:50 +03:00
res [ item ] = res [ item ] + " " + line
else :
res [ item ] = line
self . index . info = res
2005-12-01 20:34:21 +03:00
2011-06-20 07:25:34 +04:00
def strip_lead_star ( self , line ) :
2018-03-20 09:49:01 +03:00
if line . lstrip ( ) . startswith ( ' * ' ) :
line = line . replace ( ' * ' , ' ' , 1 )
2011-06-20 07:25:34 +04:00
return line
def cleanupComment ( self ) :
2018-03-20 09:48:44 +03:00
if not isinstance ( self . comment , str ) :
2011-06-20 07:25:34 +04:00
return
# remove the leading * on multi-line comments
lines = self . comment . splitlines ( True )
com = " "
for line in lines :
com = com + self . strip_lead_star ( line )
self . comment = com . strip ( )
2005-12-01 20:34:21 +03:00
def parseComment ( self , token ) :
2011-06-20 07:25:34 +04:00
com = token [ 1 ]
2005-12-01 20:34:21 +03:00
if self . top_comment == " " :
2011-06-20 07:25:34 +04:00
self . top_comment = com
2013-08-22 13:16:03 +04:00
if self . comment is None or com [ 0 ] == ' * ' :
2013-02-07 11:22:01 +04:00
self . comment = com
2011-02-16 18:57:50 +03:00
else :
2011-06-20 07:25:34 +04:00
self . comment = self . comment + com
2011-02-16 18:57:50 +03:00
token = self . lexer . token ( )
2005-12-01 20:34:21 +03:00
2018-03-15 12:42:44 +03:00
if self . comment . find ( " DOC_DISABLE " ) != - 1 :
2011-02-16 18:57:50 +03:00
self . stop_error ( )
2005-12-01 20:34:21 +03:00
2018-03-15 12:42:44 +03:00
if self . comment . find ( " DOC_ENABLE " ) != - 1 :
2011-02-16 18:57:50 +03:00
self . start_error ( )
2005-12-01 20:34:21 +03:00
2011-02-16 18:57:50 +03:00
return token
2005-12-01 20:34:21 +03:00
#
# Parse a comment block associate to a typedef
#
2018-03-20 09:49:02 +03:00
def parseTypeComment ( self , name , quiet = False ) :
2005-12-01 20:34:21 +03:00
if name [ 0 : 2 ] == ' __ ' :
2018-03-20 09:49:02 +03:00
quiet = True
2005-12-01 20:34:21 +03:00
2013-08-22 13:16:03 +04:00
if self . comment is None :
2011-02-16 18:57:50 +03:00
if not quiet :
2018-03-20 09:49:07 +03:00
self . warning ( " Missing comment for type %s " % name )
2018-03-20 09:49:02 +03:00
return None
if not self . comment . startswith ( ' * ' ) :
2011-02-16 18:57:50 +03:00
if not quiet :
2018-03-20 09:49:07 +03:00
self . warning ( " Missing * in type comment for %s " % name )
2018-03-20 09:49:02 +03:00
return None
2018-03-15 12:42:44 +03:00
lines = self . comment . split ( ' \n ' )
2018-03-20 09:49:02 +03:00
# Remove lines that contain only single asterisk
lines [ : ] = [ line for line in lines if line . strip ( ) != ' * ' ]
2018-03-20 09:49:07 +03:00
if lines [ 0 ] != " * %s : " % name :
2011-02-16 18:57:50 +03:00
if not quiet :
2018-03-20 09:49:07 +03:00
self . warning ( " Misformatted type comment for %s " % name )
2011-02-16 18:57:50 +03:00
self . warning ( " Expecting ' * %s : ' got ' %s ' " % ( name , lines [ 0 ] ) )
2018-03-20 09:49:02 +03:00
return None
2011-02-16 18:57:50 +03:00
del lines [ 0 ]
2018-03-20 09:49:02 +03:00
# Concatenate all remaining lines by striping leading asterisks
desc = " " . join ( [ line . lstrip ( " * " ) . strip ( ) for line in lines ] ) . strip ( )
if not ( quiet or desc ) :
self . warning ( " Type comment for %s lack description of the macro "
2018-03-20 09:49:07 +03:00
% name )
2018-03-20 09:49:02 +03:00
return desc
2019-09-24 15:55:56 +03:00
2005-12-01 20:34:21 +03:00
#
# Parse a comment block associate to a macro
#
2018-03-20 09:48:54 +03:00
def parseMacroComment ( self , name , quiet = 0 ) :
2011-05-31 12:41:37 +04:00
global ignored_macros
2005-12-01 20:34:21 +03:00
if name [ 0 : 2 ] == ' __ ' :
2011-02-16 18:57:50 +03:00
quiet = 1
2018-03-15 12:39:49 +03:00
if name in ignored_macros :
2011-05-31 12:41:37 +04:00
quiet = 1
2005-12-01 20:34:21 +03:00
args = [ ]
2011-02-16 18:57:50 +03:00
desc = " "
2005-12-01 20:34:21 +03:00
2013-08-22 13:16:03 +04:00
if self . comment is None :
2011-02-16 18:57:50 +03:00
if not quiet :
2018-03-20 09:49:07 +03:00
self . warning ( " Missing comment for macro %s " % name )
return args , desc
2005-12-01 20:34:21 +03:00
if self . comment [ 0 ] != ' * ' :
2011-02-16 18:57:50 +03:00
if not quiet :
2018-03-20 09:49:07 +03:00
self . warning ( " Missing * in macro comment for %s " % name )
return args , desc
2018-03-15 12:42:44 +03:00
lines = self . comment . split ( ' \n ' )
2011-02-16 18:57:50 +03:00
if lines [ 0 ] == ' * ' :
del lines [ 0 ]
2018-03-20 09:49:07 +03:00
if lines [ 0 ] != " * %s : " % name :
2011-02-16 18:57:50 +03:00
if not quiet :
2018-03-20 09:49:07 +03:00
self . warning ( " Misformatted macro comment for %s " % name )
2011-02-16 18:57:50 +03:00
self . warning ( " Expecting ' * %s : ' got ' %s ' " % ( name , lines [ 0 ] ) )
2018-03-20 09:49:07 +03:00
return args , desc
2011-02-16 18:57:50 +03:00
del lines [ 0 ]
while lines [ 0 ] == ' * ' :
del lines [ 0 ]
while len ( lines ) > 0 and lines [ 0 ] [ 0 : 3 ] == ' * @ ' :
2019-09-24 15:40:44 +03:00
prefix = lines [ 0 ] [ 3 : ]
2011-02-16 18:57:50 +03:00
try :
2019-09-24 15:40:44 +03:00
arg , desc = prefix . split ( ' : ' , 1 )
2018-03-15 12:42:44 +03:00
desc = desc . strip ( )
arg = arg . strip ( )
2019-09-24 15:42:51 +03:00
except Exception :
2011-02-16 18:57:50 +03:00
if not quiet :
2018-03-20 09:49:07 +03:00
self . warning ( " Misformatted macro comment for %s " % name )
self . warning ( " problem with ' %s ' " % lines [ 0 ] )
2011-02-16 18:57:50 +03:00
del lines [ 0 ]
continue
del lines [ 0 ]
2019-09-24 15:40:44 +03:00
line = lines [ 0 ] . strip ( )
while len ( line ) > 2 and line [ 0 : 3 ] != ' * @ ' :
while line [ 0 ] == ' * ' :
line = line [ 1 : ]
desc = desc + ' ' + line . strip ( )
2011-02-16 18:57:50 +03:00
del lines [ 0 ]
if len ( lines ) == 0 :
break
2019-09-24 15:40:44 +03:00
line = lines [ 0 ]
2005-12-01 20:34:21 +03:00
args . append ( ( arg , desc ) )
2011-02-16 18:57:50 +03:00
while len ( lines ) > 0 and lines [ 0 ] == ' * ' :
del lines [ 0 ]
desc = " "
while len ( lines ) > 0 :
2019-09-24 15:40:44 +03:00
line = lines [ 0 ]
while len ( line ) > 0 and line [ 0 ] == ' * ' :
line = line [ 1 : ]
line = line . strip ( )
desc = desc + " " + line
2011-02-16 18:57:50 +03:00
del lines [ 0 ]
2008-02-05 22:27:37 +03:00
2018-03-15 12:42:44 +03:00
desc = desc . strip ( )
2005-12-01 20:34:21 +03:00
2011-02-16 18:57:50 +03:00
if quiet == 0 :
if desc == " " :
2018-03-20 09:49:07 +03:00
self . warning ( " Macro comment for %s lack description of the macro " % name )
2005-12-01 20:34:21 +03:00
2018-03-20 09:49:07 +03:00
return args , desc
2005-12-01 20:34:21 +03:00
2018-03-20 09:49:00 +03:00
#
# Parse a comment block and merge the information found in the
# parameters descriptions, finally returns a block as complete
# as possible
#
2018-03-20 09:48:54 +03:00
def mergeFunctionComment ( self , name , description , quiet = 0 ) :
2008-11-25 18:48:11 +03:00
global ignored_functions
2005-12-01 20:34:21 +03:00
if name == ' main ' :
2011-02-16 18:57:50 +03:00
quiet = 1
2005-12-01 20:34:21 +03:00
if name [ 0 : 2 ] == ' __ ' :
2011-02-16 18:57:50 +03:00
quiet = 1
2018-03-15 12:39:49 +03:00
if name in ignored_functions :
2008-11-25 18:48:11 +03:00
quiet = 1
2005-12-01 20:34:21 +03:00
2018-03-20 09:49:07 +03:00
ret , args = description
2011-02-16 18:57:50 +03:00
desc = " "
retdesc = " "
2005-12-01 20:34:21 +03:00
2013-08-22 13:16:03 +04:00
if self . comment is None :
2011-02-16 18:57:50 +03:00
if not quiet :
2018-03-20 09:49:07 +03:00
self . warning ( " Missing comment for function %s " % name )
return ( ret [ 0 ] , retdesc ) , args , desc
2005-12-01 20:34:21 +03:00
if self . comment [ 0 ] != ' * ' :
2011-02-16 18:57:50 +03:00
if not quiet :
2018-03-20 09:49:07 +03:00
self . warning ( " Missing * in function comment for %s " % name )
return ( ret [ 0 ] , retdesc ) , args , desc
2018-03-15 12:42:44 +03:00
lines = self . comment . split ( ' \n ' )
2011-02-16 18:57:50 +03:00
if lines [ 0 ] == ' * ' :
del lines [ 0 ]
2018-03-20 09:49:07 +03:00
if lines [ 0 ] != " * %s : " % name :
2011-02-16 18:57:50 +03:00
if not quiet :
2018-03-20 09:49:07 +03:00
self . warning ( " Misformatted function comment for %s " % name )
2011-02-16 18:57:50 +03:00
self . warning ( " Expecting ' * %s : ' got ' %s ' " % ( name , lines [ 0 ] ) )
2018-03-20 09:49:07 +03:00
return ( ret [ 0 ] , retdesc ) , args , desc
2011-02-16 18:57:50 +03:00
del lines [ 0 ]
while lines [ 0 ] == ' * ' :
del lines [ 0 ]
nbargs = len ( args )
while len ( lines ) > 0 and lines [ 0 ] [ 0 : 3 ] == ' * @ ' :
2019-09-24 15:40:44 +03:00
prefix = lines [ 0 ] [ 3 : ]
2011-02-16 18:57:50 +03:00
try :
2019-09-24 15:40:44 +03:00
arg , desc = prefix . split ( ' : ' , 1 )
2018-03-15 12:42:44 +03:00
desc = desc . strip ( )
arg = arg . strip ( )
2019-09-24 15:42:51 +03:00
except Exception :
2011-02-16 18:57:50 +03:00
if not quiet :
2018-03-20 09:49:07 +03:00
self . warning ( " Misformatted function comment for %s " % name )
self . warning ( " problem with ' %s ' " % lines [ 0 ] )
2011-02-16 18:57:50 +03:00
del lines [ 0 ]
continue
del lines [ 0 ]
2019-09-24 15:40:44 +03:00
line = lines [ 0 ] . strip ( )
while len ( line ) > 2 and line [ 0 : 3 ] != ' * @ ' :
while line [ 0 ] == ' * ' :
line = line [ 1 : ]
desc = desc + ' ' + line . strip ( )
2011-02-16 18:57:50 +03:00
del lines [ 0 ]
if len ( lines ) == 0 :
break
2019-09-24 15:40:44 +03:00
line = lines [ 0 ]
2011-02-16 18:57:50 +03:00
i = 0
while i < nbargs :
if args [ i ] [ 1 ] == arg :
args [ i ] = ( args [ i ] [ 0 ] , arg , desc )
2013-02-07 11:22:01 +04:00
break
2011-02-16 18:57:50 +03:00
i = i + 1
if i > = nbargs :
if not quiet :
self . warning ( " Unable to find arg %s from function comment for %s " % (
arg , name ) )
while len ( lines ) > 0 and lines [ 0 ] == ' * ' :
del lines [ 0 ]
desc = None
while len ( lines ) > 0 :
2019-09-24 15:40:44 +03:00
line = lines [ 0 ]
2011-02-16 18:57:50 +03:00
i = 0
# Remove all leading '*', followed by at most one ' ' character
2013-07-30 12:21:11 +04:00
# since we need to preserve correct indentation of code examples
2019-09-24 15:40:44 +03:00
while i < len ( line ) and line [ i ] == ' * ' :
2011-02-16 18:57:50 +03:00
i = i + 1
if i > 0 :
2019-09-24 15:40:44 +03:00
if i < len ( line ) and line [ i ] == ' ' :
2011-02-16 18:57:50 +03:00
i = i + 1
2019-09-24 15:40:44 +03:00
line = line [ i : ]
if len ( line ) > = 6 and line [ 0 : 7 ] == " Returns " :
2011-02-16 18:57:50 +03:00
try :
2019-09-24 15:40:44 +03:00
line = line . split ( ' ' , 1 ) [ 1 ]
2019-09-24 15:42:51 +03:00
except Exception :
2019-09-24 15:40:44 +03:00
line = " "
retdesc = line . strip ( )
2011-02-16 18:57:50 +03:00
del lines [ 0 ]
while len ( lines ) > 0 :
2019-09-24 15:40:44 +03:00
line = lines [ 0 ]
while len ( line ) > 0 and line [ 0 ] == ' * ' :
line = line [ 1 : ]
line = line . strip ( )
retdesc = retdesc + " " + line
2011-02-16 18:57:50 +03:00
del lines [ 0 ]
else :
if desc is not None :
2019-09-24 15:40:44 +03:00
desc = desc + " \n " + line
2011-02-16 18:57:50 +03:00
else :
2019-09-24 15:40:44 +03:00
desc = line
2011-02-16 18:57:50 +03:00
del lines [ 0 ]
if desc is None :
desc = " "
2018-03-15 12:42:44 +03:00
retdesc = retdesc . strip ( )
desc = desc . strip ( )
2011-02-16 18:57:50 +03:00
if quiet == 0 :
2018-03-20 09:49:00 +03:00
#
# report missing comments
#
2011-02-16 18:57:50 +03:00
i = 0
while i < nbargs :
2013-08-22 13:16:03 +04:00
if args [ i ] [ 2 ] is None and args [ i ] [ 0 ] != " void " and args [ i ] [ 1 ] is not None :
2011-02-16 18:57:50 +03:00
self . warning ( " Function comment for %s lacks description of arg %s " % ( name , args [ i ] [ 1 ] ) )
i = i + 1
if retdesc == " " and ret [ 0 ] != " void " :
2018-03-20 09:49:07 +03:00
self . warning ( " Function comment for %s lacks description of return value " % name )
2011-02-16 18:57:50 +03:00
if desc == " " :
2018-03-20 09:49:07 +03:00
self . warning ( " Function comment for %s lacks description of the function " % name )
2011-02-16 18:57:50 +03:00
2018-03-20 09:49:07 +03:00
return ( ret [ 0 ] , retdesc ) , args , desc
2005-12-01 20:34:21 +03:00
def parsePreproc ( self , token ) :
2011-02-16 18:57:50 +03:00
if debug :
2018-03-15 12:30:03 +03:00
print ( " => preproc " , token , self . lexer . tokens )
2005-12-01 20:34:21 +03:00
name = token [ 1 ]
2011-02-16 18:57:50 +03:00
if name == " #include " :
token = self . lexer . token ( )
2013-08-22 13:16:03 +04:00
if token is None :
2011-02-16 18:57:50 +03:00
return None
if token [ 0 ] == ' preproc ' :
self . index_add ( token [ 1 ] , self . filename , not self . is_header ,
" include " )
return self . lexer . token ( )
return token
if name == " #define " :
token = self . lexer . token ( )
2013-08-22 13:16:03 +04:00
if token is None :
2011-02-16 18:57:50 +03:00
return None
if token [ 0 ] == ' preproc ' :
2018-03-20 09:49:00 +03:00
# TODO macros with arguments
2011-02-16 18:57:50 +03:00
name = token [ 1 ]
lst = [ ]
token = self . lexer . token ( )
2013-08-22 13:16:03 +04:00
while token is not None and token [ 0 ] == ' preproc ' and \
2011-02-16 18:57:50 +03:00
token [ 1 ] [ 0 ] != ' # ' :
lst . append ( token [ 1 ] )
token = self . lexer . token ( )
2005-12-01 20:34:21 +03:00
try :
2018-03-15 12:42:44 +03:00
name = name . split ( ' ( ' ) [ 0 ]
2019-09-24 15:42:51 +03:00
except Exception :
2005-12-01 20:34:21 +03:00
pass
2016-04-21 15:31:23 +03:00
# skip hidden macros
if name in hidden_macros :
return token
2018-12-13 17:53:50 +03:00
if name [ - 2 : ] == " _H " or name [ - 8 : ] == " _H_ALLOW " :
return token
2016-04-21 15:31:23 +03:00
2015-06-05 12:48:59 +03:00
strValue = None
if len ( lst ) == 1 and lst [ 0 ] [ 0 ] == ' " ' and lst [ 0 ] [ - 1 ] == ' " ' :
strValue = lst [ 0 ] [ 1 : - 1 ]
( args , desc ) = self . parseMacroComment ( name , not self . is_header )
2011-02-16 18:57:50 +03:00
self . index_add ( name , self . filename , not self . is_header ,
2015-06-05 12:48:59 +03:00
" macro " , ( args , desc , strValue ) )
2011-02-16 18:57:50 +03:00
return token
#
# Processing of conditionals modified by Bill 1/1/05
#
# We process conditionals (i.e. tokens from #ifdef, #ifndef,
# #if, #else and #endif) for headers and mainline code,
# store the ones from the header in libxml2-api.xml, and later
# (in the routine merge_public) verify that the two (header and
# mainline code) agree.
#
# There is a small problem with processing the headers. Some of
# the variables are not concerned with enabling / disabling of
# library functions (e.g. '__XML_PARSER_H__'), and we don't want
# them to be included in libxml2-api.xml, or involved in
# the check between the header and the mainline code. To
# accomplish this, we ignore any conditional which doesn't include
# the string 'ENABLED'
#
if name == " #ifdef " :
apstr = self . lexer . tokens [ 0 ] [ 1 ]
try :
self . defines . append ( apstr )
2018-03-15 12:42:44 +03:00
if apstr . find ( ' ENABLED ' ) != - 1 :
2011-02-16 18:57:50 +03:00
self . conditionals . append ( " defined( %s ) " % apstr )
2019-09-24 15:42:51 +03:00
except Exception :
2011-02-16 18:57:50 +03:00
pass
elif name == " #ifndef " :
apstr = self . lexer . tokens [ 0 ] [ 1 ]
try :
self . defines . append ( apstr )
2018-03-15 12:42:44 +03:00
if apstr . find ( ' ENABLED ' ) != - 1 :
2011-02-16 18:57:50 +03:00
self . conditionals . append ( " !defined( %s ) " % apstr )
2019-09-24 15:42:51 +03:00
except Exception :
2011-02-16 18:57:50 +03:00
pass
elif name == " #if " :
apstr = " "
for tok in self . lexer . tokens :
if apstr != " " :
apstr = apstr + " "
apstr = apstr + tok [ 1 ]
try :
self . defines . append ( apstr )
2018-03-15 12:42:44 +03:00
if apstr . find ( ' ENABLED ' ) != - 1 :
2011-02-16 18:57:50 +03:00
self . conditionals . append ( apstr )
2019-09-24 15:42:51 +03:00
except Exception :
2011-02-16 18:57:50 +03:00
pass
elif name == " #else " :
if self . conditionals != [ ] and \
2018-03-15 12:42:44 +03:00
self . defines [ - 1 ] . find ( ' ENABLED ' ) != - 1 :
2011-02-16 18:57:50 +03:00
self . conditionals [ - 1 ] = " !( %s ) " % self . conditionals [ - 1 ]
elif name == " #endif " :
if self . conditionals != [ ] and \
2018-03-15 12:42:44 +03:00
self . defines [ - 1 ] . find ( ' ENABLED ' ) != - 1 :
2011-02-16 18:57:50 +03:00
self . conditionals = self . conditionals [ : - 1 ]
self . defines = self . defines [ : - 1 ]
token = self . lexer . token ( )
2013-08-22 13:16:03 +04:00
while token is not None and token [ 0 ] == ' preproc ' and \
2011-02-16 18:57:50 +03:00
token [ 1 ] [ 0 ] != ' # ' :
token = self . lexer . token ( )
return token
2005-12-01 20:34:21 +03:00
2018-03-20 09:49:00 +03:00
#
# token acquisition on top of the lexer, it handle internally
# preprocessor and comments since they are logically not part of
# the program structure.
#
2005-12-06 19:50:31 +03:00
def push ( self , tok ) :
self . lexer . push ( tok )
2005-12-01 20:34:21 +03:00
def token ( self ) :
global ignored_words
token = self . lexer . token ( )
2013-08-22 13:16:03 +04:00
while token is not None :
2011-02-16 18:57:50 +03:00
if token [ 0 ] == ' comment ' :
token = self . parseComment ( token )
continue
elif token [ 0 ] == ' preproc ' :
token = self . parsePreproc ( token )
continue
elif token [ 0 ] == " name " and token [ 1 ] == " __const " :
token = ( " name " , " const " )
return token
elif token [ 0 ] == " name " and token [ 1 ] == " __attribute " :
token = self . lexer . token ( )
2013-08-22 13:16:03 +04:00
while token is not None and token [ 1 ] != " ; " :
2011-02-16 18:57:50 +03:00
token = self . lexer . token ( )
return token
2018-03-15 12:39:49 +03:00
elif token [ 0 ] == " name " and token [ 1 ] in ignored_words :
2011-02-16 18:57:50 +03:00
( n , info ) = ignored_words [ token [ 1 ] ]
i = 0
while i < n :
token = self . lexer . token ( )
i = i + 1
token = self . lexer . token ( )
continue
else :
if debug :
2018-03-15 12:30:03 +03:00
print ( " => " , token )
2011-02-16 18:57:50 +03:00
return token
return None
2005-12-01 20:34:21 +03:00
2018-03-20 09:49:00 +03:00
#
# Parse a typedef, it records the type and its name.
#
2005-12-01 20:34:21 +03:00
def parseTypedef ( self , token ) :
2013-08-22 13:16:03 +04:00
if token is None :
2011-02-16 18:57:50 +03:00
return None
token = self . parseType ( token )
2013-08-22 13:16:03 +04:00
if token is None :
2011-02-16 18:57:50 +03:00
self . error ( " parsing typedef " )
return None
base_type = self . type
type = base_type
2018-03-20 09:49:00 +03:00
# self.debug("end typedef type", token)
2013-08-22 13:16:03 +04:00
while token is not None :
2011-02-16 18:57:50 +03:00
if token [ 0 ] == " name " :
name = token [ 1 ]
signature = self . signature
2013-08-22 13:16:03 +04:00
if signature is not None :
2018-03-15 12:42:44 +03:00
type = type . split ( ' ( ' ) [ 0 ]
2011-02-16 18:57:50 +03:00
d = self . mergeFunctionComment ( name ,
( ( type , None ) , signature ) , 1 )
self . index_add ( name , self . filename , not self . is_header ,
" functype " , d )
else :
if base_type == " struct " :
self . index_add ( name , self . filename , not self . is_header ,
" struct " , type )
base_type = " struct " + name
else :
# TODO report missing or misformatted comments
info = self . parseTypeComment ( name , 1 )
self . index_add ( name , self . filename , not self . is_header ,
" typedef " , type , info )
token = self . token ( )
else :
self . error ( " parsing typedef: expecting a name " )
return token
2018-03-20 09:49:00 +03:00
# self.debug("end typedef", token)
2013-08-22 13:16:03 +04:00
if token is not None and token [ 0 ] == ' sep ' and token [ 1 ] == ' , ' :
2011-02-16 18:57:50 +03:00
type = base_type
token = self . token ( )
2013-08-22 13:16:03 +04:00
while token is not None and token [ 0 ] == " op " :
2011-02-16 18:57:50 +03:00
type = type + token [ 1 ]
token = self . token ( )
2013-08-22 13:16:03 +04:00
elif token is not None and token [ 0 ] == ' sep ' and token [ 1 ] == ' ; ' :
2013-02-07 11:22:01 +04:00
break
2013-08-22 13:16:03 +04:00
elif token is not None and token [ 0 ] == ' name ' :
2011-02-16 18:57:50 +03:00
type = base_type
2013-02-07 11:22:01 +04:00
continue
2011-02-16 18:57:50 +03:00
else :
self . error ( " parsing typedef: expecting ' ; ' " , token )
return token
token = self . token ( )
return token
2008-02-05 22:27:37 +03:00
2018-03-20 09:49:00 +03:00
#
# Parse a C code block, used for functions it parse till
# the balancing } included
#
2005-12-01 20:34:21 +03:00
def parseBlock ( self , token ) :
2013-08-22 13:16:03 +04:00
while token is not None :
2011-02-16 18:57:50 +03:00
if token [ 0 ] == " sep " and token [ 1 ] == " { " :
token = self . token ( )
token = self . parseBlock ( token )
elif token [ 0 ] == " sep " and token [ 1 ] == " } " :
self . comment = None
token = self . token ( )
return token
else :
if self . collect_ref == 1 :
oldtok = token
token = self . token ( )
if oldtok [ 0 ] == " name " and oldtok [ 1 ] [ 0 : 3 ] == " vir " :
if token [ 0 ] == " sep " and token [ 1 ] == " ( " :
self . index_add_ref ( oldtok [ 1 ] , self . filename ,
0 , " function " )
token = self . token ( )
elif token [ 0 ] == " name " :
token = self . token ( )
if token [ 0 ] == " sep " and ( token [ 1 ] == " ; " or
token [ 1 ] == " , " or token [ 1 ] == " = " ) :
self . index_add_ref ( oldtok [ 1 ] , self . filename ,
0 , " type " )
elif oldtok [ 0 ] == " name " and oldtok [ 1 ] [ 0 : 4 ] == " XEN_ " :
self . index_add_ref ( oldtok [ 1 ] , self . filename ,
0 , " typedef " )
elif oldtok [ 0 ] == " name " and oldtok [ 1 ] [ 0 : 7 ] == " LIBXEN_ " :
self . index_add_ref ( oldtok [ 1 ] , self . filename ,
0 , " typedef " )
else :
token = self . token ( )
return token
2005-12-01 20:34:21 +03:00
2018-03-20 09:49:00 +03:00
#
# Parse a C struct definition till the balancing }
#
2005-12-01 20:34:21 +03:00
def parseStruct ( self , token ) :
fields = [ ]
2018-03-20 09:49:00 +03:00
# self.debug("start parseStruct", token)
2013-08-22 13:16:03 +04:00
while token is not None :
2011-02-16 18:57:50 +03:00
if token [ 0 ] == " sep " and token [ 1 ] == " { " :
token = self . token ( )
token = self . parseTypeBlock ( token )
elif token [ 0 ] == " sep " and token [ 1 ] == " } " :
self . struct_fields = fields
2018-03-20 09:49:00 +03:00
# self.debug("end parseStruct", token)
# print(fields)
2011-02-16 18:57:50 +03:00
token = self . token ( )
return token
else :
base_type = self . type
2018-03-20 09:49:00 +03:00
# self.debug("before parseType", token)
2011-02-16 18:57:50 +03:00
token = self . parseType ( token )
2018-03-20 09:49:00 +03:00
# self.debug("after parseType", token)
2013-08-22 13:16:03 +04:00
if token is not None and token [ 0 ] == " name " :
2011-02-16 18:57:50 +03:00
fname = token [ 1 ]
token = self . token ( )
if token [ 0 ] == " sep " and token [ 1 ] == " ; " :
self . comment = None
token = self . token ( )
2011-06-20 07:25:34 +04:00
self . cleanupComment ( )
if self . type == " union " :
fields . append ( ( self . type , fname , self . comment ,
self . union_fields ) )
self . union_fields = [ ]
else :
fields . append ( ( self . type , fname , self . comment ) )
2011-02-16 18:57:50 +03:00
self . comment = None
else :
self . error ( " parseStruct: expecting ; " , token )
2013-08-22 13:16:03 +04:00
elif token is not None and token [ 0 ] == " sep " and token [ 1 ] == " { " :
2011-02-16 18:57:50 +03:00
token = self . token ( )
token = self . parseTypeBlock ( token )
2013-08-22 13:16:03 +04:00
if token is not None and token [ 0 ] == " name " :
2011-02-16 18:57:50 +03:00
token = self . token ( )
2013-08-22 13:16:03 +04:00
if token is not None and token [ 0 ] == " sep " and token [ 1 ] == " ; " :
2011-02-16 18:57:50 +03:00
token = self . token ( )
else :
self . error ( " parseStruct: expecting ; " , token )
else :
self . error ( " parseStruct: name " , token )
token = self . token ( )
2013-02-07 11:22:01 +04:00
self . type = base_type
2005-12-01 20:34:21 +03:00
self . struct_fields = fields
2018-03-20 09:49:00 +03:00
# self.debug("end parseStruct", token)
# print(fields)
2011-02-16 18:57:50 +03:00
return token
2005-12-01 20:34:21 +03:00
2018-03-20 09:49:00 +03:00
#
# Parse a C union definition till the balancing }
#
2011-06-20 07:25:34 +04:00
def parseUnion ( self , token ) :
fields = [ ]
# self.debug("start parseUnion", token)
2013-08-22 13:16:03 +04:00
while token is not None :
2011-06-20 07:25:34 +04:00
if token [ 0 ] == " sep " and token [ 1 ] == " { " :
token = self . token ( )
token = self . parseTypeBlock ( token )
elif token [ 0 ] == " sep " and token [ 1 ] == " } " :
self . union_fields = fields
# self.debug("end parseUnion", token)
2018-03-15 12:30:03 +03:00
# print(fields)
2011-06-20 07:25:34 +04:00
token = self . token ( )
return token
else :
base_type = self . type
# self.debug("before parseType", token)
token = self . parseType ( token )
# self.debug("after parseType", token)
2013-08-22 13:16:03 +04:00
if token is not None and token [ 0 ] == " name " :
2011-06-20 07:25:34 +04:00
fname = token [ 1 ]
token = self . token ( )
if token [ 0 ] == " sep " and token [ 1 ] == " ; " :
self . comment = None
token = self . token ( )
self . cleanupComment ( )
fields . append ( ( self . type , fname , self . comment ) )
self . comment = None
else :
self . error ( " parseUnion: expecting ; " , token )
2013-08-22 13:16:03 +04:00
elif token is not None and token [ 0 ] == " sep " and token [ 1 ] == " { " :
2011-06-20 07:25:34 +04:00
token = self . token ( )
token = self . parseTypeBlock ( token )
2013-08-22 13:16:03 +04:00
if token is not None and token [ 0 ] == " name " :
2011-06-20 07:25:34 +04:00
token = self . token ( )
2013-08-22 13:16:03 +04:00
if token is not None and token [ 0 ] == " sep " and token [ 1 ] == " ; " :
2011-06-20 07:25:34 +04:00
token = self . token ( )
else :
self . error ( " parseUnion: expecting ; " , token )
else :
self . error ( " parseUnion: name " , token )
token = self . token ( )
2013-02-07 11:22:01 +04:00
self . type = base_type
2011-06-20 07:25:34 +04:00
self . union_fields = fields
# self.debug("end parseUnion", token)
2018-03-15 12:30:03 +03:00
# print(fields)
2011-06-20 07:25:34 +04:00
return token
2018-03-20 09:49:00 +03:00
#
# Parse a C enum block, parse till the balancing }
#
2005-12-01 20:34:21 +03:00
def parseEnumBlock ( self , token ) :
self . enums = [ ]
2011-02-16 18:57:50 +03:00
name = None
comment = " "
2014-06-26 00:54:36 +04:00
value = " -1 "
2017-07-22 09:05:12 +03:00
commentsBeforeVal = self . comment is not None
2013-08-22 13:16:03 +04:00
while token is not None :
2011-02-16 18:57:50 +03:00
if token [ 0 ] == " sep " and token [ 1 ] == " { " :
token = self . token ( )
token = self . parseTypeBlock ( token )
elif token [ 0 ] == " sep " and token [ 1 ] == " } " :
2013-08-22 13:16:03 +04:00
if name is not None :
2011-06-20 07:25:34 +04:00
self . cleanupComment ( )
2013-08-22 13:16:03 +04:00
if self . comment is not None :
2011-02-16 18:57:50 +03:00
comment = self . comment
self . comment = None
self . enums . append ( ( name , value , comment ) )
token = self . token ( )
return token
elif token [ 0 ] == " name " :
2015-06-05 12:38:51 +03:00
self . cleanupComment ( )
if name is not None :
if self . comment is not None :
2018-03-15 12:42:44 +03:00
comment = self . comment . strip ( )
2015-06-05 12:38:51 +03:00
self . comment = None
self . enums . append ( ( name , value , comment ) )
name = token [ 1 ]
comment = " "
token = self . token ( )
if token [ 0 ] == " op " and token [ 1 ] [ 0 ] == " = " :
value = " "
if len ( token [ 1 ] ) > 1 :
value = token [ 1 ] [ 1 : ]
2011-02-16 18:57:50 +03:00
token = self . token ( )
2015-06-05 12:38:51 +03:00
while token [ 0 ] != " sep " or ( token [ 1 ] != ' , ' and
token [ 1 ] != ' } ' ) :
docs: Teach apibuild to deal with (1U << 31) too
The apibuild script is a terrifying beast that parses some source
files of ours and produces an XML representation of them. When it
comes to parsing enums we have in some header files, it tries to
be clever and detect a value that an enum member has (or if it is
an alias for a different member). Whilst doing that it has to
deal with values we give to the members in many formats. At some
places we just pass the value in decimal:
VIR_DOMAIN_BLOCK_JOB_TYPE_PULL = 1,
in other places, we use the aliasing:
VIR_CONNECT_GET_ALL_DOMAINS_STATS_ACTIVE = VIR_CONNECT_LIST_DOMAINS_ACTIVE,
and in other places bitwise shifts are used:
VIR_CONNECT_GET_ALL_DOMAINS_STATS_ENFORCE_STATS = 1 << 31, /* enforce requested stats */
The script tries to parse all of these resulting in the following
tokens: "1", "VIR_CONNECT_LIST_DOMAINS_ACTIVE", "1<<31"; Then, the
script tries to turn these into integers using python's eval()
function. This function succeeds on the first and the last
tokens. But, if we were to modify the last example so that it's
of the following form:
VIR_CONNECT_GET_ALL_DOMAINS_STATS_ENFORCE_STATS = 1U << 31, /* enforce requested stats */
the token representing enum's member value will then be "1U<<31".
So our parsing is good. Unfortunately, python is not aware of the
difference between signed and unsigned C types, therefore eval()
fails over this token and the parser falls back thinking it's an
alias to another enum member. Well it's not.
The solution is to transform [0-9]U into [0-9] as for our
purposes here it's the same thing.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2016-05-28 14:21:43 +03:00
# We might be dealing with '1U << 12' here
2019-09-24 15:29:27 +03:00
value = value + re . sub ( r " ^( \ d+)U$ " , " \\ 1 " , token [ 1 ] )
2011-02-16 18:57:50 +03:00
token = self . token ( )
2015-06-05 12:38:51 +03:00
else :
try :
value = " %d " % ( int ( value ) + 1 )
2019-09-24 15:42:51 +03:00
except Exception :
2018-03-20 09:49:07 +03:00
self . warning ( " Failed to compute value of enum %s " % name )
2018-03-20 09:48:47 +03:00
value = " "
2015-06-05 12:38:51 +03:00
if token [ 0 ] == " sep " and token [ 1 ] == " , " :
2017-07-22 09:05:12 +03:00
if commentsBeforeVal :
self . cleanupComment ( )
self . enums . append ( ( name , value , self . comment ) )
name = comment = self . comment = None
2015-06-05 12:38:51 +03:00
token = self . token ( )
2011-02-16 18:57:50 +03:00
else :
token = self . token ( )
return token
2005-12-01 20:34:21 +03:00
2012-05-15 14:59:00 +04:00
def parseVirEnumDecl ( self , token ) :
if token [ 0 ] != " name " :
self . error ( " parsing VIR_ENUM_DECL: expecting name " , token )
token = self . token ( )
if token [ 0 ] != " sep " :
self . error ( " parsing VIR_ENUM_DECL: expecting ' ) ' " , token )
if token [ 1 ] != ' ) ' :
self . error ( " parsing VIR_ENUM_DECL: expecting ' ) ' " , token )
token = self . token ( )
if token [ 0 ] == " sep " and token [ 1 ] == ' ; ' :
token = self . token ( )
return token
def parseVirEnumImpl ( self , token ) :
# First the type name
if token [ 0 ] != " name " :
self . error ( " parsing VIR_ENUM_IMPL: expecting name " , token )
token = self . token ( )
if token [ 0 ] != " sep " :
self . error ( " parsing VIR_ENUM_IMPL: expecting ' , ' " , token )
if token [ 1 ] != ' , ' :
self . error ( " parsing VIR_ENUM_IMPL: expecting ' , ' " , token )
token = self . token ( )
# Now the sentinel name
if token [ 0 ] != " name " :
self . error ( " parsing VIR_ENUM_IMPL: expecting name " , token )
token = self . token ( )
if token [ 0 ] != " sep " :
self . error ( " parsing VIR_ENUM_IMPL: expecting ' , ' " , token )
if token [ 1 ] != ' , ' :
self . error ( " parsing VIR_ENUM_IMPL: expecting ' , ' " , token )
token = self . token ( )
# Now a list of strings (optional comments)
while token is not None :
isGettext = False
# First a string, optionally with N_(...)
if token [ 0 ] == ' name ' :
if token [ 1 ] != ' N_ ' :
self . error ( " parsing VIR_ENUM_IMPL: expecting ' N_ ' " , token )
token = self . token ( )
if token [ 0 ] != " sep " or token [ 1 ] != ' ( ' :
self . error ( " parsing VIR_ENUM_IMPL: expecting ' ( ' " , token )
token = self . token ( )
isGettext = True
if token [ 0 ] != " string " :
self . error ( " parsing VIR_ENUM_IMPL: expecting a string " , token )
token = self . token ( )
elif token [ 0 ] == " string " :
token = self . token ( )
else :
self . error ( " parsing VIR_ENUM_IMPL: expecting a string " , token )
# Then a separator
if token [ 0 ] == " sep " :
if isGettext and token [ 1 ] == ' ) ' :
token = self . token ( )
if token [ 1 ] == ' , ' :
token = self . token ( )
if token [ 1 ] == ' ) ' :
token = self . token ( )
break
# Then an optional comment
if token [ 0 ] == " comment " :
token = self . token ( )
if token [ 0 ] == " sep " and token [ 1 ] == ' ; ' :
token = self . token ( )
return token
2014-02-28 16:16:17 +04:00
def parseVirLogInit ( self , token ) :
if token [ 0 ] != " string " :
self . error ( " parsing VIR_LOG_INIT: expecting string " , token )
token = self . token ( )
if token [ 0 ] != " sep " :
self . error ( " parsing VIR_LOG_INIT: expecting ' ) ' " , token )
if token [ 1 ] != ' ) ' :
self . error ( " parsing VIR_LOG_INIT: expecting ' ) ' " , token )
token = self . token ( )
if token [ 0 ] == " sep " and token [ 1 ] == ' ; ' :
token = self . token ( )
return token
2018-03-20 09:49:00 +03:00
#
# Parse a C definition block, used for structs or unions it parse till
# the balancing }
#
2005-12-01 20:34:21 +03:00
def parseTypeBlock ( self , token ) :
2013-08-22 13:16:03 +04:00
while token is not None :
2011-02-16 18:57:50 +03:00
if token [ 0 ] == " sep " and token [ 1 ] == " { " :
token = self . token ( )
token = self . parseTypeBlock ( token )
elif token [ 0 ] == " sep " and token [ 1 ] == " } " :
token = self . token ( )
return token
else :
token = self . token ( )
return token
2005-12-01 20:34:21 +03:00
2018-03-20 09:49:00 +03:00
#
# Parse a type: the fact that the type name can either occur after
# the definition or within the definition makes it a little harder
# if inside, the name token is pushed back before returning
#
2005-12-01 20:34:21 +03:00
def parseType ( self , token ) :
self . type = " "
2011-02-16 18:57:50 +03:00
self . struct_fields = [ ]
2011-06-20 07:25:34 +04:00
self . union_fields = [ ]
2005-12-01 20:34:21 +03:00
self . signature = None
2013-08-22 13:16:03 +04:00
if token is None :
2011-02-16 18:57:50 +03:00
return token
2018-03-20 09:48:53 +03:00
while ( token [ 0 ] == " name " and
token [ 1 ] in [ " const " , " unsigned " , " signed " ] ) :
2011-02-16 18:57:50 +03:00
if self . type == " " :
self . type = token [ 1 ]
else :
self . type = self . type + " " + token [ 1 ]
token = self . token ( )
2005-12-01 20:34:21 +03:00
2005-12-06 19:50:31 +03:00
if token [ 0 ] == " name " and token [ 1 ] == " long " :
2011-02-16 18:57:50 +03:00
if self . type == " " :
self . type = token [ 1 ]
else :
self . type = self . type + " " + token [ 1 ]
# some read ahead for long long
oldtmp = token
token = self . token ( )
if token [ 0 ] == " name " and token [ 1 ] == " long " :
self . type = self . type + " " + token [ 1 ]
else :
self . push ( token )
token = oldtmp
2011-06-20 07:25:34 +04:00
oldtmp = token
token = self . token ( )
2011-02-16 18:57:50 +03:00
if token [ 0 ] == " name " and token [ 1 ] == " int " :
2011-06-20 07:25:34 +04:00
self . type = self . type + " " + token [ 1 ]
else :
self . push ( token )
token = oldtmp
2005-12-06 19:50:31 +03:00
elif token [ 0 ] == " name " and token [ 1 ] == " short " :
2011-02-16 18:57:50 +03:00
if self . type == " " :
self . type = token [ 1 ]
else :
self . type = self . type + " " + token [ 1 ]
2008-02-05 22:27:37 +03:00
2005-12-01 20:34:21 +03:00
elif token [ 0 ] == " name " and token [ 1 ] == " struct " :
2011-02-16 18:57:50 +03:00
if self . type == " " :
self . type = token [ 1 ]
else :
self . type = self . type + " " + token [ 1 ]
token = self . token ( )
nametok = None
if token [ 0 ] == " name " :
nametok = token
token = self . token ( )
2013-08-22 13:16:03 +04:00
if token is not None and token [ 0 ] == " sep " and token [ 1 ] == " { " :
2011-02-16 18:57:50 +03:00
token = self . token ( )
token = self . parseStruct ( token )
2013-08-22 13:16:03 +04:00
elif token is not None and token [ 0 ] == " op " and token [ 1 ] == " * " :
2011-02-16 18:57:50 +03:00
self . type = self . type + " " + nametok [ 1 ] + " * "
token = self . token ( )
2013-08-22 13:16:03 +04:00
while token is not None and token [ 0 ] == " op " and token [ 1 ] == " * " :
2011-02-16 18:57:50 +03:00
self . type = self . type + " * "
token = self . token ( )
if token [ 0 ] == " name " :
nametok = token
token = self . token ( )
else :
self . error ( " struct : expecting name " , token )
return token
2013-08-22 13:16:03 +04:00
elif token is not None and token [ 0 ] == " name " and nametok is not None :
2011-02-16 18:57:50 +03:00
self . type = self . type + " " + nametok [ 1 ]
return token
2013-08-22 13:16:03 +04:00
if nametok is not None :
2011-02-16 18:57:50 +03:00
self . lexer . push ( token )
token = nametok
return token
2005-12-01 20:34:21 +03:00
2011-06-20 07:25:34 +04:00
elif token [ 0 ] == " name " and token [ 1 ] == " union " :
if self . type == " " :
self . type = token [ 1 ]
else :
self . type = self . type + " " + token [ 1 ]
token = self . token ( )
nametok = None
if token [ 0 ] == " name " :
nametok = token
token = self . token ( )
2013-08-22 13:16:03 +04:00
if token is not None and token [ 0 ] == " sep " and token [ 1 ] == " { " :
2011-06-20 07:25:34 +04:00
token = self . token ( )
token = self . parseUnion ( token )
2013-08-22 13:16:03 +04:00
elif token is not None and token [ 0 ] == " name " and nametok is not None :
2011-06-20 07:25:34 +04:00
self . type = self . type + " " + nametok [ 1 ]
return token
2013-08-22 13:16:03 +04:00
if nametok is not None :
2011-06-20 07:25:34 +04:00
self . lexer . push ( token )
token = nametok
return token
2005-12-01 20:34:21 +03:00
elif token [ 0 ] == " name " and token [ 1 ] == " enum " :
2011-02-16 18:57:50 +03:00
if self . type == " " :
self . type = token [ 1 ]
else :
self . type = self . type + " " + token [ 1 ]
self . enums = [ ]
token = self . token ( )
2013-08-22 13:16:03 +04:00
if token is not None and token [ 0 ] == " sep " and token [ 1 ] == " { " :
2017-07-22 09:05:12 +03:00
# drop comments before the enum block
self . comment = None
2011-02-16 18:57:50 +03:00
token = self . token ( )
token = self . parseEnumBlock ( token )
else :
self . error ( " parsing enum: expecting ' { ' " , token )
enum_type = None
2013-08-22 13:16:03 +04:00
if token is not None and token [ 0 ] != " name " :
2011-02-16 18:57:50 +03:00
self . lexer . push ( token )
token = ( " name " , " enum " )
else :
enum_type = token [ 1 ]
for enum in self . enums :
self . index_add ( enum [ 0 ] , self . filename ,
not self . is_header , " enum " ,
( enum [ 1 ] , enum [ 2 ] , enum_type ) )
return token
2012-05-15 14:59:00 +04:00
elif token [ 0 ] == " name " and token [ 1 ] == " VIR_ENUM_DECL " :
token = self . token ( )
2013-08-22 13:16:03 +04:00
if token is not None and token [ 0 ] == " sep " and token [ 1 ] == " ( " :
2012-05-15 14:59:00 +04:00
token = self . token ( )
token = self . parseVirEnumDecl ( token )
else :
self . error ( " parsing VIR_ENUM_DECL: expecting ' ( ' " , token )
2013-08-22 13:16:03 +04:00
if token is not None :
2012-05-15 14:59:00 +04:00
self . lexer . push ( token )
token = ( " name " , " virenumdecl " )
return token
elif token [ 0 ] == " name " and token [ 1 ] == " VIR_ENUM_IMPL " :
token = self . token ( )
2013-08-22 13:16:03 +04:00
if token is not None and token [ 0 ] == " sep " and token [ 1 ] == " ( " :
2012-05-15 14:59:00 +04:00
token = self . token ( )
token = self . parseVirEnumImpl ( token )
else :
self . error ( " parsing VIR_ENUM_IMPL: expecting ' ( ' " , token )
2013-08-22 13:16:03 +04:00
if token is not None :
2012-05-15 14:59:00 +04:00
self . lexer . push ( token )
token = ( " name " , " virenumimpl " )
return token
2011-02-16 18:57:50 +03:00
2014-02-28 16:16:17 +04:00
elif token [ 0 ] == " name " and token [ 1 ] == " VIR_LOG_INIT " :
token = self . token ( )
if token is not None and token [ 0 ] == " sep " and token [ 1 ] == " ( " :
token = self . token ( )
token = self . parseVirLogInit ( token )
else :
self . error ( " parsing VIR_LOG_INIT: expecting ' ( ' " , token )
if token is not None :
self . lexer . push ( token )
token = ( " name " , " virloginit " )
return token
2011-02-16 18:57:50 +03:00
elif token [ 0 ] == " name " :
if self . type == " " :
self . type = token [ 1 ]
else :
self . type = self . type + " " + token [ 1 ]
else :
self . error ( " parsing type %s : expecting a name " % ( self . type ) ,
token )
return token
token = self . token ( )
2013-08-22 13:16:03 +04:00
while token is not None and ( token [ 0 ] == " op " or
2011-02-16 18:57:50 +03:00
token [ 0 ] == " name " and token [ 1 ] == " const " ) :
self . type = self . type + " " + token [ 1 ]
token = self . token ( )
2005-12-01 20:34:21 +03:00
2018-03-20 09:49:00 +03:00
#
# if there is a parenthesis here, this means a function type
#
2013-08-22 13:16:03 +04:00
if token is not None and token [ 0 ] == " sep " and token [ 1 ] == ' ( ' :
2011-02-16 18:57:50 +03:00
self . type = self . type + token [ 1 ]
token = self . token ( )
2013-08-22 13:16:03 +04:00
while token is not None and token [ 0 ] == " op " and token [ 1 ] == ' * ' :
2011-02-16 18:57:50 +03:00
self . type = self . type + token [ 1 ]
token = self . token ( )
2018-03-20 09:48:46 +03:00
if token is None or token [ 0 ] != " name " :
2013-02-07 11:22:01 +04:00
self . error ( " parsing function type, name expected " , token )
2011-02-16 18:57:50 +03:00
return token
self . type = self . type + token [ 1 ]
nametok = token
token = self . token ( )
2013-08-22 13:16:03 +04:00
if token is not None and token [ 0 ] == " sep " and token [ 1 ] == ' ) ' :
2011-02-16 18:57:50 +03:00
self . type = self . type + token [ 1 ]
token = self . token ( )
2013-08-22 13:16:03 +04:00
if token is not None and token [ 0 ] == " sep " and token [ 1 ] == ' ( ' :
2011-02-16 18:57:50 +03:00
token = self . token ( )
2013-02-07 11:22:01 +04:00
type = self . type
token = self . parseSignature ( token )
self . type = type
2011-02-16 18:57:50 +03:00
else :
2013-02-07 11:22:01 +04:00
self . error ( " parsing function type, ' ( ' expected " , token )
2011-02-16 18:57:50 +03:00
return token
else :
2013-02-07 11:22:01 +04:00
self . error ( " parsing function type, ' ) ' expected " , token )
2011-02-16 18:57:50 +03:00
return token
self . lexer . push ( token )
token = nametok
return token
2018-03-20 09:49:00 +03:00
#
# do some lookahead for arrays
#
2013-08-22 13:16:03 +04:00
if token is not None and token [ 0 ] == " name " :
2011-02-16 18:57:50 +03:00
nametok = token
token = self . token ( )
2013-08-22 13:16:03 +04:00
if token is not None and token [ 0 ] == " sep " and token [ 1 ] == ' [ ' :
2011-06-20 07:25:34 +04:00
self . type = self . type + " " + nametok [ 1 ]
2013-08-22 13:16:03 +04:00
while token is not None and token [ 0 ] == " sep " and token [ 1 ] == ' [ ' :
2011-02-16 18:57:50 +03:00
self . type = self . type + token [ 1 ]
token = self . token ( )
2013-08-22 13:16:03 +04:00
while token is not None and token [ 0 ] != ' sep ' and \
2011-02-16 18:57:50 +03:00
token [ 1 ] != ' ] ' and token [ 1 ] != ' ; ' :
self . type = self . type + token [ 1 ]
token = self . token ( )
2013-08-22 13:16:03 +04:00
if token is not None and token [ 0 ] == ' sep ' and token [ 1 ] == ' ] ' :
2011-02-16 18:57:50 +03:00
self . type = self . type + token [ 1 ]
token = self . token ( )
else :
2013-02-07 11:22:01 +04:00
self . error ( " parsing array type, ' ] ' expected " , token )
2011-02-16 18:57:50 +03:00
return token
2013-08-22 13:16:03 +04:00
elif token is not None and token [ 0 ] == " sep " and token [ 1 ] == ' : ' :
2018-03-20 09:49:00 +03:00
# remove :12 in case it's a limited int size
2011-02-16 18:57:50 +03:00
token = self . token ( )
token = self . token ( )
self . lexer . push ( token )
token = nametok
return token
2005-12-01 20:34:21 +03:00
2018-03-20 09:49:00 +03:00
#
# Parse a signature: '(' has been parsed and we scan the type definition
# up to the ')' included
2005-12-01 20:34:21 +03:00
def parseSignature ( self , token ) :
signature = [ ]
2013-08-22 13:16:03 +04:00
if token is not None and token [ 0 ] == " sep " and token [ 1 ] == ' ) ' :
2011-02-16 18:57:50 +03:00
self . signature = [ ]
token = self . token ( )
return token
2013-08-22 13:16:03 +04:00
while token is not None :
2011-02-16 18:57:50 +03:00
token = self . parseType ( token )
2013-08-22 13:16:03 +04:00
if token is not None and token [ 0 ] == " name " :
2011-02-16 18:57:50 +03:00
signature . append ( ( self . type , token [ 1 ] , None ) )
token = self . token ( )
2013-08-22 13:16:03 +04:00
elif token is not None and token [ 0 ] == " sep " and token [ 1 ] == ' , ' :
2011-02-16 18:57:50 +03:00
token = self . token ( )
continue
2013-08-22 13:16:03 +04:00
elif token is not None and token [ 0 ] == " sep " and token [ 1 ] == ' ) ' :
2018-03-20 09:49:00 +03:00
# only the type was provided
2011-02-16 18:57:50 +03:00
if self . type == " ... " :
signature . append ( ( self . type , " ... " , None ) )
else :
signature . append ( ( self . type , None , None ) )
2013-08-22 13:16:03 +04:00
if token is not None and token [ 0 ] == " sep " :
2011-02-16 18:57:50 +03:00
if token [ 1 ] == ' , ' :
token = self . token ( )
continue
elif token [ 1 ] == ' ) ' :
token = self . token ( )
break
self . signature = signature
return token
2005-12-01 20:34:21 +03:00
2011-05-30 16:36:41 +04:00
# this dict contains the functions that are allowed to use [unsigned]
# long for legacy reasons in their signature and return type. this list is
# fixed. new procedures and public APIs have to use [unsigned] long long
2018-03-20 09:48:46 +03:00
long_legacy_functions = {
" virGetVersion " : ( False , ( " libVer " , " typeVer " ) ) ,
" virConnectGetLibVersion " : ( False , ( " libVer " ) ) ,
" virConnectGetVersion " : ( False , ( " hvVer " ) ) ,
" virDomainGetMaxMemory " : ( True , ( ) ) ,
" virDomainMigrate " : ( False , ( " flags " , " bandwidth " ) ) ,
" virDomainMigrate2 " : ( False , ( " flags " , " bandwidth " ) ) ,
" virDomainMigrateBegin3 " : ( False , ( " flags " , " bandwidth " ) ) ,
" virDomainMigrateConfirm3 " : ( False , ( " flags " , " bandwidth " ) ) ,
" virDomainMigrateDirect " : ( False , ( " flags " , " bandwidth " ) ) ,
" virDomainMigrateFinish " : ( False , ( " flags " ) ) ,
" virDomainMigrateFinish2 " : ( False , ( " flags " ) ) ,
" virDomainMigrateFinish3 " : ( False , ( " flags " ) ) ,
" virDomainMigratePeer2Peer " : ( False , ( " flags " , " bandwidth " ) ) ,
" virDomainMigratePerform " : ( False , ( " flags " , " bandwidth " ) ) ,
" virDomainMigratePerform3 " : ( False , ( " flags " , " bandwidth " ) ) ,
" virDomainMigratePrepare " : ( False , ( " flags " , " bandwidth " ) ) ,
" virDomainMigratePrepare2 " : ( False , ( " flags " , " bandwidth " ) ) ,
" virDomainMigratePrepare3 " : ( False , ( " flags " , " bandwidth " ) ) ,
" virDomainMigratePrepareTunnel " : ( False , ( " flags " , " bandwidth " ) ) ,
" virDomainMigratePrepareTunnel3 " : ( False , ( " flags " , " bandwidth " ) ) ,
" virDomainMigrateToURI " : ( False , ( " flags " , " bandwidth " ) ) ,
" virDomainMigrateToURI2 " : ( False , ( " flags " , " bandwidth " ) ) ,
" virDomainMigrateVersion1 " : ( False , ( " flags " , " bandwidth " ) ) ,
" virDomainMigrateVersion2 " : ( False , ( " flags " , " bandwidth " ) ) ,
" virDomainMigrateVersion3 " : ( False , ( " flags " , " bandwidth " ) ) ,
" virDomainMigrateSetMaxSpeed " : ( False , ( " bandwidth " ) ) ,
" virDomainSetMaxMemory " : ( False , ( " memory " ) ) ,
" virDomainSetMemory " : ( False , ( " memory " ) ) ,
" virDomainSetMemoryFlags " : ( False , ( " memory " ) ) ,
" virDomainBlockCommit " : ( False , ( " bandwidth " ) ) ,
" virDomainBlockJobSetSpeed " : ( False , ( " bandwidth " ) ) ,
" virDomainBlockPull " : ( False , ( " bandwidth " ) ) ,
" virDomainBlockRebase " : ( False , ( " bandwidth " ) ) ,
" virDomainMigrateGetMaxSpeed " : ( False , ( " bandwidth " ) )
}
2011-05-30 16:36:41 +04:00
def checkLongLegacyFunction ( self , name , return_type , signature ) :
if " long " in return_type and " long long " not in return_type :
try :
if not CParser . long_legacy_functions [ name ] [ 0 ] :
raise Exception ( )
2019-09-24 15:42:51 +03:00
except Exception :
2011-05-30 16:36:41 +04:00
self . error ( ( " function ' %s ' is not allowed to return long, "
2018-03-20 09:49:07 +03:00
" use long long instead " ) % name )
2011-05-30 16:36:41 +04:00
for param in signature :
if " long " in param [ 0 ] and " long long " not in param [ 0 ] :
try :
if param [ 1 ] not in CParser . long_legacy_functions [ name ] [ 1 ] :
raise Exception ( )
2019-09-24 15:42:51 +03:00
except Exception :
2011-05-30 16:36:41 +04:00
self . error ( ( " function ' %s ' is not allowed to take long "
" parameter ' %s ' , use long long instead " )
% ( name , param [ 1 ] ) )
# this dict contains the structs that are allowed to use [unsigned]
# long for legacy reasons. this list is fixed. new structs have to use
# [unsigned] long long
2018-03-20 09:48:46 +03:00
long_legacy_struct_fields = {
" _virDomainInfo " : ( " maxMem " , " memory " ) ,
" _virNodeInfo " : ( " memory " ) ,
" _virDomainBlockJobInfo " : ( " bandwidth " )
}
2011-05-30 16:36:41 +04:00
def checkLongLegacyStruct ( self , name , fields ) :
for field in fields :
if " long " in field [ 0 ] and " long long " not in field [ 0 ] :
try :
if field [ 1 ] not in CParser . long_legacy_struct_fields [ name ] :
raise Exception ( )
2019-09-24 15:42:51 +03:00
except Exception :
2011-05-30 16:36:41 +04:00
self . error ( ( " struct ' %s ' is not allowed to contain long "
2018-03-20 09:48:59 +03:00
" field ' %s ' , use long long instead " )
2011-05-30 16:36:41 +04:00
% ( name , field [ 1 ] ) )
2018-03-20 09:49:00 +03:00
#
# Parse a global definition, be it a type, variable or function
# the extern "C" blocks are a bit nasty and require it to recurse.
#
2005-12-01 20:34:21 +03:00
def parseGlobal ( self , token ) :
static = 0
if token [ 1 ] == ' extern ' :
2011-02-16 18:57:50 +03:00
token = self . token ( )
2013-08-22 13:16:03 +04:00
if token is None :
2011-02-16 18:57:50 +03:00
return token
if token [ 0 ] == ' string ' :
if token [ 1 ] == ' C ' :
token = self . token ( )
2013-08-22 13:16:03 +04:00
if token is None :
2011-02-16 18:57:50 +03:00
return token
if token [ 0 ] == ' sep ' and token [ 1 ] == " { " :
token = self . token ( )
2018-03-15 12:30:03 +03:00
# print('Entering extern "C line ', self.lineno())
2013-08-22 13:16:03 +04:00
while token is not None and ( token [ 0 ] != ' sep ' or
2011-02-16 18:57:50 +03:00
token [ 1 ] != " } " ) :
if token [ 0 ] == ' name ' :
token = self . parseGlobal ( token )
else :
self . error (
" token %s %s unexpected at the top level " % (
token [ 0 ] , token [ 1 ] ) )
token = self . parseGlobal ( token )
2018-03-15 12:30:03 +03:00
# print('Exiting extern "C" line', self.lineno())
2011-02-16 18:57:50 +03:00
token = self . token ( )
return token
else :
return token
elif token [ 1 ] == ' static ' :
static = 1
token = self . token ( )
2018-03-20 09:48:46 +03:00
if token is None or token [ 0 ] != ' name ' :
2011-02-16 18:57:50 +03:00
return token
if token [ 1 ] == ' typedef ' :
token = self . token ( )
return self . parseTypedef ( token )
else :
token = self . parseType ( token )
type_orig = self . type
2013-08-22 13:16:03 +04:00
if token is None or token [ 0 ] != " name " :
2011-02-16 18:57:50 +03:00
return token
type = type_orig
self . name = token [ 1 ]
token = self . token ( )
2013-08-22 13:16:03 +04:00
while token is not None and ( token [ 0 ] == " sep " or token [ 0 ] == " op " ) :
2011-02-16 18:57:50 +03:00
if token [ 0 ] == " sep " :
if token [ 1 ] == " [ " :
type = type + token [ 1 ]
token = self . token ( )
2018-03-20 09:48:59 +03:00
while token is not None and ( token [ 0 ] != " sep " or
token [ 1 ] != " ; " ) :
2011-02-16 18:57:50 +03:00
type = type + token [ 1 ]
token = self . token ( )
2013-08-22 13:16:03 +04:00
if token is not None and token [ 0 ] == " op " and token [ 1 ] == " = " :
2018-03-20 09:49:00 +03:00
#
# Skip the initialization of the variable
#
2011-02-16 18:57:50 +03:00
token = self . token ( )
if token [ 0 ] == ' sep ' and token [ 1 ] == ' { ' :
token = self . token ( )
token = self . parseBlock ( token )
else :
self . comment = None
2018-03-20 09:48:59 +03:00
while token is not None and ( token [ 0 ] != " sep " or
token [ 1 ] not in ' ,; ' ) :
token = self . token ( )
2011-02-16 18:57:50 +03:00
self . comment = None
2013-08-22 13:16:03 +04:00
if token is None or token [ 0 ] != " sep " or ( token [ 1 ] != ' ; ' and
2011-02-16 18:57:50 +03:00
token [ 1 ] != ' , ' ) :
self . error ( " missing ' ; ' or ' , ' after value " )
2013-08-22 13:16:03 +04:00
if token is not None and token [ 0 ] == " sep " :
2011-02-16 18:57:50 +03:00
if token [ 1 ] == " ; " :
self . comment = None
token = self . token ( )
if type == " struct " :
2011-05-30 16:36:41 +04:00
self . checkLongLegacyStruct ( self . name , self . struct_fields )
2011-02-16 18:57:50 +03:00
self . index_add ( self . name , self . filename ,
not self . is_header , " struct " , self . struct_fields )
else :
self . index_add ( self . name , self . filename ,
not self . is_header , " variable " , type )
break
elif token [ 1 ] == " ( " :
token = self . token ( )
token = self . parseSignature ( token )
2013-08-22 13:16:03 +04:00
if token is None :
2011-02-16 18:57:50 +03:00
return None
if token [ 0 ] == " sep " and token [ 1 ] == " ; " :
2011-05-30 16:36:41 +04:00
self . checkLongLegacyFunction ( self . name , type , self . signature )
2011-02-16 18:57:50 +03:00
d = self . mergeFunctionComment ( self . name ,
( ( type , None ) , self . signature ) , 1 )
self . index_add ( self . name , self . filename , static ,
" function " , d )
token = self . token ( )
elif token [ 0 ] == " sep " and token [ 1 ] == " { " :
2011-05-30 16:36:41 +04:00
self . checkLongLegacyFunction ( self . name , type , self . signature )
2011-02-16 18:57:50 +03:00
d = self . mergeFunctionComment ( self . name ,
( ( type , None ) , self . signature ) , static )
self . index_add ( self . name , self . filename , static ,
" function " , d )
token = self . token ( )
2013-02-07 11:22:01 +04:00
token = self . parseBlock ( token )
2011-02-16 18:57:50 +03:00
elif token [ 1 ] == ' , ' :
self . comment = None
self . index_add ( self . name , self . filename , static ,
" variable " , type )
type = type_orig
token = self . token ( )
2013-08-22 13:16:03 +04:00
while token is not None and token [ 0 ] == " sep " :
2011-02-16 18:57:50 +03:00
type = type + token [ 1 ]
token = self . token ( )
2013-08-22 13:16:03 +04:00
if token is not None and token [ 0 ] == " name " :
2011-02-16 18:57:50 +03:00
self . name = token [ 1 ]
token = self . token ( )
else :
break
return token
2005-12-01 20:34:21 +03:00
def parse ( self ) :
2011-05-12 14:19:42 +04:00
if not quiet :
2018-03-15 12:30:03 +03:00
print ( " Parsing %s " % ( self . filename ) )
2005-12-01 20:34:21 +03:00
token = self . token ( )
2013-08-22 13:16:03 +04:00
while token is not None :
2005-12-01 20:34:21 +03:00
if token [ 0 ] == ' name ' :
2011-02-16 18:57:50 +03:00
token = self . parseGlobal ( token )
2005-12-01 20:34:21 +03:00
else :
2011-02-16 18:57:50 +03:00
self . error ( " token %s %s unexpected at the top level " % (
token [ 0 ] , token [ 1 ] ) )
token = self . parseGlobal ( token )
return
self . parseTopComment ( self . top_comment )
2005-12-01 20:34:21 +03:00
return self . index
2008-02-05 22:27:37 +03:00
2005-12-01 20:34:21 +03:00
class docBuilder :
""" A documentation builder """
2011-02-16 17:09:09 +03:00
def __init__ ( self , name , path = ' . ' , directories = [ ' . ' ] , includes = [ ] ) :
2005-12-01 20:34:21 +03:00
self . name = name
2011-02-16 17:09:09 +03:00
self . path = path
2005-12-01 20:34:21 +03:00
self . directories = directories
2011-09-09 14:55:21 +04:00
if name == " libvirt " :
2018-03-15 12:54:07 +03:00
self . includes = includes + list ( included_files . keys ( ) )
2011-09-09 14:55:21 +04:00
elif name == " libvirt-qemu " :
2018-03-15 12:54:07 +03:00
self . includes = includes + list ( qemu_included_files . keys ( ) )
Introduce an LXC specific public API & library
This patch introduces support for LXC specific public APIs. In
common with what was done for QEMU, this creates a libvirt_lxc.so
library and libvirt/libvirt-lxc.h header file.
The actual APIs are
int virDomainLxcOpenNamespace(virDomainPtr domain,
int **fdlist,
unsigned int flags);
int virDomainLxcEnterNamespace(virDomainPtr domain,
unsigned int nfdlist,
int *fdlist,
unsigned int *noldfdlist,
int **oldfdlist,
unsigned int flags);
which provide a way to use the setns() system call to move the
calling process into the container's namespace. It is not
practical to write in a generically applicable manner. The
nearest that we could get to such an API would be an API which
allows to pass a command + argv to be executed inside a
container. Even if we had such a generic API, this LXC specific
API is still useful, because it allows the caller to maintain
the current process context, in particular any I/O streams they
have open.
NB the virDomainLxcEnterNamespace() API is special in that it
runs client side, so does not involve the internal driver API.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-12-21 17:15:19 +04:00
elif name == " libvirt-lxc " :
2018-03-15 12:54:07 +03:00
self . includes = includes + list ( lxc_included_files . keys ( ) )
2015-04-15 17:23:25 +03:00
elif name == " libvirt-admin " :
2018-03-15 12:54:07 +03:00
self . includes = includes + list ( admin_included_files . keys ( ) )
2011-02-16 18:57:50 +03:00
self . modules = { }
self . headers = { }
self . idx = index ( )
2005-12-01 20:34:21 +03:00
self . xref = { }
2011-02-16 18:57:50 +03:00
self . index = { }
self . basename = name
2013-01-29 18:35:28 +04:00
self . errors = 0
2005-12-01 20:34:21 +03:00
2012-07-27 17:03:03 +04:00
def warning ( self , msg ) :
global warnings
warnings = warnings + 1
2018-03-15 12:30:03 +03:00
print ( msg )
2012-07-27 17:03:03 +04:00
2013-01-29 18:35:28 +04:00
def error ( self , msg ) :
self . errors + = 1
2018-03-15 12:30:03 +03:00
print ( " Error: " , msg , file = sys . stderr )
2013-01-29 18:35:28 +04:00
2005-12-01 20:34:21 +03:00
def indexString ( self , id , str ) :
2013-08-22 13:16:03 +04:00
if str is None :
2011-02-16 18:57:50 +03:00
return
2018-03-15 12:42:44 +03:00
str = str . replace ( " ' " , ' ' )
str = str . replace ( ' " ' , ' ' )
str = str . replace ( " / " , ' ' )
str = str . replace ( ' * ' , ' ' )
str = str . replace ( " [ " , ' ' )
str = str . replace ( " ] " , ' ' )
str = str . replace ( " ( " , ' ' )
str = str . replace ( " ) " , ' ' )
str = str . replace ( " < " , ' ' )
str = str . replace ( ' > ' , ' ' )
str = str . replace ( " & " , ' ' )
str = str . replace ( ' # ' , ' ' )
str = str . replace ( " , " , ' ' )
str = str . replace ( ' . ' , ' ' )
str = str . replace ( ' ; ' , ' ' )
tokens = str . split ( )
2011-02-16 18:57:50 +03:00
for token in tokens :
2018-03-16 20:49:58 +03:00
c = token [ 0 ]
if not re . match ( r " [a-zA-Z] " , c ) :
pass
elif len ( token ) < 3 :
pass
else :
lower = token . lower ( )
# TODO: generalize this a bit
if lower == ' and ' or lower == ' the ' :
2011-02-16 18:57:50 +03:00
pass
2018-03-16 20:49:58 +03:00
elif token in self . xref :
self . xref [ token ] . append ( id )
2011-02-16 18:57:50 +03:00
else :
2018-03-16 20:49:58 +03:00
self . xref [ token ] = [ id ]
2005-12-01 20:34:21 +03:00
def analyze ( self ) :
2011-05-12 14:19:42 +04:00
if not quiet :
2018-03-15 12:30:03 +03:00
print ( " Project %s : %d headers, %d modules " % ( self . name , len ( self . headers . keys ( ) ) , len ( self . modules . keys ( ) ) ) )
2011-02-16 18:57:50 +03:00
self . idx . analyze ( )
2005-12-01 20:34:21 +03:00
def scanHeaders ( self ) :
2011-02-16 18:57:50 +03:00
for header in self . headers . keys ( ) :
parser = CParser ( header )
idx = parser . parse ( )
2013-02-07 11:22:01 +04:00
self . headers [ header ] = idx
2011-02-16 18:57:50 +03:00
self . idx . merge ( idx )
2005-12-01 20:34:21 +03:00
def scanModules ( self ) :
2011-02-16 18:57:50 +03:00
for module in self . modules . keys ( ) :
parser = CParser ( module )
idx = parser . parse ( )
# idx.analyze()
self . modules [ module ] = idx
self . idx . merge_public ( idx )
2005-12-01 20:34:21 +03:00
def scan ( self ) :
for directory in self . directories :
2011-02-16 18:57:50 +03:00
files = glob . glob ( directory + " /*.c " )
for file in files :
skip = 1
for incl in self . includes :
2018-03-15 12:42:44 +03:00
if file . find ( incl ) != - 1 :
2013-02-07 11:22:01 +04:00
skip = 0
2011-02-16 18:57:50 +03:00
break
if skip == 0 :
2013-02-07 11:22:01 +04:00
self . modules [ file ] = None
2011-02-16 18:57:50 +03:00
files = glob . glob ( directory + " /*.h " )
for file in files :
skip = 1
for incl in self . includes :
2018-03-15 12:42:44 +03:00
if file . find ( incl ) != - 1 :
2013-02-07 11:22:01 +04:00
skip = 0
2011-02-16 18:57:50 +03:00
break
if skip == 0 :
2013-02-07 11:22:01 +04:00
self . headers [ file ] = None
2011-02-16 18:57:50 +03:00
self . scanHeaders ( )
self . scanModules ( )
2008-02-05 22:27:37 +03:00
2005-12-01 20:34:21 +03:00
def modulename_file ( self , file ) :
module = os . path . basename ( file )
2011-02-16 18:57:50 +03:00
if module [ - 2 : ] == ' .h ' :
module = module [ : - 2 ]
elif module [ - 2 : ] == ' .c ' :
module = module [ : - 2 ]
return module
2005-12-01 20:34:21 +03:00
def serialize_enum ( self , output , name ) :
id = self . idx . enums [ name ]
output . write ( " <enum name= ' %s ' file= ' %s ' " % ( name ,
2011-02-16 18:57:50 +03:00
self . modulename_file ( id . header ) ) )
2013-08-22 13:16:03 +04:00
if id . info is not None :
2011-02-16 18:57:50 +03:00
info = id . info
2019-01-24 14:23:15 +03:00
valhex = " "
2013-08-22 13:16:03 +04:00
if info [ 0 ] is not None and info [ 0 ] != ' ' :
2011-02-16 18:57:50 +03:00
try :
val = eval ( info [ 0 ] )
2019-01-24 14:23:15 +03:00
valhex = hex ( val )
2019-09-24 15:42:51 +03:00
except Exception :
2011-02-16 18:57:50 +03:00
val = info [ 0 ]
2013-02-07 11:22:01 +04:00
output . write ( " value= ' %s ' " % ( val ) )
2019-01-24 14:23:15 +03:00
if valhex != " " :
output . write ( " value_hex= ' %s ' " % ( valhex ) )
2019-09-24 15:29:27 +03:00
m = re . match ( r " \ (?1<<( \ d+) \ )? " , info [ 0 ] )
2019-01-24 14:23:15 +03:00
if m :
output . write ( " value_bitshift= ' %s ' " % ( m . group ( 1 ) ) )
2013-08-22 13:16:03 +04:00
if info [ 2 ] is not None and info [ 2 ] != ' ' :
2013-02-07 11:22:01 +04:00
output . write ( " type= ' %s ' " % info [ 2 ] )
2013-08-22 13:16:03 +04:00
if info [ 1 ] is not None and info [ 1 ] != ' ' :
2013-02-07 11:22:01 +04:00
output . write ( " info= ' %s ' " % escape ( info [ 1 ] ) )
2005-12-01 20:34:21 +03:00
output . write ( " /> \n " )
def serialize_macro ( self , output , name ) :
id = self . idx . macros [ name ]
2015-06-05 12:48:59 +03:00
output . write ( " <macro name= ' %s ' file= ' %s ' " % ( name ,
2011-02-16 18:57:50 +03:00
self . modulename_file ( id . header ) ) )
2015-06-05 12:48:59 +03:00
if id . info is None :
args = [ ]
desc = None
strValue = None
else :
( args , desc , strValue ) = id . info
if strValue is not None :
output . write ( " string= ' %s ' " % strValue )
output . write ( " > \n " )
if desc is not None and desc != " " :
output . write ( " <info><![CDATA[ %s ]]></info> \n " % ( desc ) )
self . indexString ( name , desc )
for arg in args :
( name , desc ) = arg
if desc is not None and desc != " " :
output . write ( " <arg name= ' %s ' info= ' %s ' /> \n " % (
name , escape ( desc ) ) )
self . indexString ( name , desc )
else :
2018-03-20 09:49:07 +03:00
output . write ( " <arg name= ' %s ' /> \n " % name )
2005-12-01 20:34:21 +03:00
output . write ( " </macro> \n " )
2011-06-20 07:25:34 +04:00
def serialize_union ( self , output , field , desc ) :
2018-03-20 09:48:46 +03:00
output . write ( " <field name= ' %s ' type= ' union ' info= ' %s ' > \n " % ( field [ 1 ] , desc ) )
2011-06-20 07:25:34 +04:00
output . write ( " <union> \n " )
for f in field [ 3 ] :
desc = f [ 2 ]
2013-08-22 13:16:03 +04:00
if desc is None :
2011-06-20 07:25:34 +04:00
desc = ' '
else :
desc = escape ( desc )
2018-03-20 09:48:46 +03:00
output . write ( " <field name= ' %s ' type= ' %s ' info= ' %s ' /> \n " % ( f [ 1 ] , f [ 0 ] , desc ) )
2011-06-20 07:25:34 +04:00
output . write ( " </union> \n " )
output . write ( " </field> \n " )
2005-12-01 20:34:21 +03:00
def serialize_typedef ( self , output , name ) :
id = self . idx . typedefs [ name ]
2011-02-16 18:57:50 +03:00
if id . info [ 0 : 7 ] == ' struct ' :
output . write ( " <struct name= ' %s ' file= ' %s ' type= ' %s ' " % (
name , self . modulename_file ( id . header ) , id . info ) )
name = id . info [ 7 : ]
2018-03-20 09:48:44 +03:00
if ( name in self . idx . structs and
isinstance ( self . idx . structs [ name ] . info , ( list , tuple ) ) ) :
2013-02-07 11:22:01 +04:00
output . write ( " > \n " )
2011-02-16 18:57:50 +03:00
try :
for field in self . idx . structs [ name ] . info :
desc = field [ 2 ]
self . indexString ( name , desc )
2013-08-22 13:16:03 +04:00
if desc is None :
2011-02-16 18:57:50 +03:00
desc = ' '
else :
desc = escape ( desc )
2011-06-20 07:25:34 +04:00
if field [ 0 ] == " union " :
self . serialize_union ( output , field , desc )
else :
2018-03-20 09:48:46 +03:00
output . write ( " <field name= ' %s ' type= ' %s ' info= ' %s ' /> \n " % ( field [ 1 ] , field [ 0 ] , desc ) )
2019-09-24 15:42:51 +03:00
except Exception :
2018-03-20 09:49:07 +03:00
self . warning ( " Failed to serialize struct %s " % name )
2011-02-16 18:57:50 +03:00
output . write ( " </struct> \n " )
else :
2013-02-07 11:22:01 +04:00
output . write ( " /> \n " )
2018-03-20 09:48:46 +03:00
else :
2011-02-16 18:57:50 +03:00
output . write ( " <typedef name= ' %s ' file= ' %s ' type= ' %s ' " % (
name , self . modulename_file ( id . header ) , id . info ) )
2005-12-01 20:34:21 +03:00
try :
2011-02-16 18:57:50 +03:00
desc = id . extra
2013-08-22 13:16:03 +04:00
if desc is not None and desc != " " :
2011-02-16 18:57:50 +03:00
output . write ( " > \n <info><![CDATA[ %s ]]></info> \n " % ( desc ) )
output . write ( " </typedef> \n " )
else :
output . write ( " /> \n " )
2019-09-24 15:42:51 +03:00
except Exception :
2011-02-16 18:57:50 +03:00
output . write ( " /> \n " )
2005-12-01 20:34:21 +03:00
def serialize_variable ( self , output , name ) :
id = self . idx . variables [ name ]
2013-08-22 13:16:03 +04:00
if id . info is not None :
2011-02-16 18:57:50 +03:00
output . write ( " <variable name= ' %s ' file= ' %s ' type= ' %s ' /> \n " % (
name , self . modulename_file ( id . header ) , id . info ) )
else :
output . write ( " <variable name= ' %s ' file= ' %s ' /> \n " % (
name , self . modulename_file ( id . header ) ) )
2008-02-05 22:27:37 +03:00
2005-12-01 20:34:21 +03:00
def serialize_function ( self , output , name ) :
id = self . idx . functions [ name ]
2011-05-12 14:19:42 +04:00
if name == debugsym and not quiet :
2018-03-15 12:30:03 +03:00
print ( " => " , id )
2005-12-01 20:34:21 +03:00
2016-06-28 14:28:48 +03:00
# NB: this is consumed by a regex in 'getAPIFilenames' in hvsupport.pl
2005-12-01 20:34:21 +03:00
output . write ( " < %s name= ' %s ' file= ' %s ' module= ' %s ' > \n " % ( id . type ,
2011-02-16 18:57:50 +03:00
name , self . modulename_file ( id . header ) ,
self . modulename_file ( id . module ) ) )
#
# Processing of conditionals modified by Bill 1/1/05
#
2013-08-22 13:16:03 +04:00
if id . conditionals is not None :
2011-02-16 18:57:50 +03:00
apstr = " "
for cond in id . conditionals :
if apstr != " " :
apstr = apstr + " && "
apstr = apstr + cond
2018-03-20 09:48:47 +03:00
output . write ( " <cond> %s </cond> \n " % ( apstr ) )
2011-02-16 18:57:50 +03:00
try :
( ret , params , desc ) = id . info
output . write ( " <info><![CDATA[ %s ]]></info> \n " % ( desc ) )
self . indexString ( name , desc )
2013-08-22 13:16:03 +04:00
if ret [ 0 ] is not None :
2011-02-16 18:57:50 +03:00
if ret [ 0 ] == " void " :
output . write ( " <return type= ' void ' /> \n " )
2018-03-15 12:39:49 +03:00
elif ( ret [ 1 ] is None or ret [ 1 ] == ' ' ) and name not in ignored_functions :
2013-01-29 18:35:28 +04:00
self . error ( " Missing documentation for return of function ` %s ' " % name )
2011-02-16 18:57:50 +03:00
else :
output . write ( " <return type= ' %s ' info= ' %s ' /> \n " % (
ret [ 0 ] , escape ( ret [ 1 ] ) ) )
self . indexString ( name , ret [ 1 ] )
for param in params :
if param [ 0 ] == ' void ' :
continue
2013-08-22 13:16:03 +04:00
if ( param [ 2 ] is None or param [ 2 ] == ' ' ) :
2018-03-15 12:39:49 +03:00
if name in ignored_functions :
2013-01-29 18:35:28 +04:00
output . write ( " <arg name= ' %s ' type= ' %s ' info= ' ' /> \n " % ( param [ 1 ] , param [ 0 ] ) )
else :
self . error ( " Missing documentation for arg ` %s ' of function ` %s ' " % ( param [ 1 ] , name ) )
2011-02-16 18:57:50 +03:00
else :
output . write ( " <arg name= ' %s ' type= ' %s ' info= ' %s ' /> \n " % ( param [ 1 ] , param [ 0 ] , escape ( param [ 2 ] ) ) )
self . indexString ( name , param [ 2 ] )
2019-09-24 15:42:51 +03:00
except Exception :
2018-03-15 12:30:03 +03:00
print ( " Exception: " , sys . exc_info ( ) [ 1 ] , file = sys . stderr )
2018-03-15 12:36:06 +03:00
self . warning ( " Failed to save function %s info: %s " % ( name , repr ( id . info ) ) )
2005-12-01 20:34:21 +03:00
output . write ( " </ %s > \n " % ( id . type ) )
def serialize_exports ( self , output , file ) :
module = self . modulename_file ( file )
2011-02-16 18:57:50 +03:00
output . write ( " <file name= ' %s ' > \n " % ( module ) )
dict = self . headers [ file ]
2013-08-22 13:16:03 +04:00
if dict . info is not None :
2018-12-13 14:23:42 +03:00
for data in ( ' Summary ' , ' Description ' ) :
2011-02-16 18:57:50 +03:00
try :
output . write ( " < %s > %s </ %s > \n " % (
2018-03-16 20:47:36 +03:00
data . lower ( ) ,
2011-02-16 18:57:50 +03:00
escape ( dict . info [ data ] ) ,
2018-03-16 20:47:36 +03:00
data . lower ( ) ) )
except KeyError :
2011-05-12 14:19:42 +04:00
self . warning ( " Header %s lacks a %s description " % ( module , data ) )
2018-03-15 12:39:49 +03:00
if ' Description ' in dict . info :
2011-02-16 18:57:50 +03:00
desc = dict . info [ ' Description ' ]
2018-03-15 12:42:44 +03:00
if desc . find ( " DEPRECATED " ) != - 1 :
2011-02-16 18:57:50 +03:00
output . write ( " <deprecated/> \n " )
2005-12-01 20:34:21 +03:00
2018-03-20 09:48:57 +03:00
for id in uniq ( dict . macros . keys ( ) ) :
2011-02-16 18:57:50 +03:00
# Macros are sometime used to masquerade other types.
2018-03-15 12:39:49 +03:00
if id in dict . functions :
2011-02-16 18:57:50 +03:00
continue
2018-03-15 12:39:49 +03:00
if id in dict . variables :
2011-02-16 18:57:50 +03:00
continue
2018-03-15 12:39:49 +03:00
if id in dict . typedefs :
2011-02-16 18:57:50 +03:00
continue
2018-03-15 12:39:49 +03:00
if id in dict . structs :
2011-02-16 18:57:50 +03:00
continue
2018-03-15 12:39:49 +03:00
if id in dict . unions :
2011-06-20 07:25:34 +04:00
continue
2018-03-15 12:39:49 +03:00
if id in dict . enums :
2011-02-16 18:57:50 +03:00
continue
output . write ( " <exports symbol= ' %s ' type= ' macro ' /> \n " % ( id ) )
2018-03-20 09:48:57 +03:00
for id in uniq ( dict . enums . keys ( ) ) :
2011-02-16 18:57:50 +03:00
output . write ( " <exports symbol= ' %s ' type= ' enum ' /> \n " % ( id ) )
2018-03-20 09:48:57 +03:00
for id in uniq ( dict . typedefs . keys ( ) ) :
2011-02-16 18:57:50 +03:00
output . write ( " <exports symbol= ' %s ' type= ' typedef ' /> \n " % ( id ) )
2018-03-20 09:48:57 +03:00
for id in uniq ( dict . structs . keys ( ) ) :
2011-02-16 18:57:50 +03:00
output . write ( " <exports symbol= ' %s ' type= ' struct ' /> \n " % ( id ) )
2018-03-20 09:48:57 +03:00
for id in uniq ( dict . variables . keys ( ) ) :
2011-02-16 18:57:50 +03:00
output . write ( " <exports symbol= ' %s ' type= ' variable ' /> \n " % ( id ) )
2018-03-20 09:48:57 +03:00
for id in uniq ( dict . functions . keys ( ) ) :
2011-02-16 18:57:50 +03:00
output . write ( " <exports symbol= ' %s ' type= ' function ' /> \n " % ( id ) )
output . write ( " </file> \n " )
2005-12-01 20:34:21 +03:00
def serialize_xrefs_files ( self , output ) :
2018-03-15 12:51:57 +03:00
headers = sorted ( self . headers . keys ( ) )
2005-12-01 20:34:21 +03:00
for file in headers :
2011-02-16 18:57:50 +03:00
module = self . modulename_file ( file )
output . write ( " <file name= ' %s ' > \n " % ( module ) )
dict = self . headers [ file ]
2018-03-20 09:48:59 +03:00
ids = uniq ( list ( dict . functions . keys ( ) ) +
list ( dict . variables . keys ( ) ) +
list ( dict . macros . keys ( ) ) +
list ( dict . typedefs . keys ( ) ) +
list ( dict . structs . keys ( ) ) +
2018-03-15 12:54:07 +03:00
list ( dict . enums . keys ( ) ) )
2011-02-16 18:57:50 +03:00
for id in ids :
output . write ( " <ref name= ' %s ' /> \n " % ( id ) )
output . write ( " </file> \n " )
2005-12-01 20:34:21 +03:00
pass
def serialize_xrefs_functions ( self , output ) :
funcs = { }
2011-02-16 18:57:50 +03:00
for name in self . idx . functions . keys ( ) :
id = self . idx . functions [ name ]
try :
( ret , params , desc ) = id . info
for param in params :
if param [ 0 ] == ' void ' :
continue
2018-03-15 12:39:49 +03:00
if param [ 0 ] in funcs :
2011-02-16 18:57:50 +03:00
funcs [ param [ 0 ] ] . append ( name )
else :
funcs [ param [ 0 ] ] = [ name ]
2019-09-24 15:42:51 +03:00
except Exception :
2011-02-16 18:57:50 +03:00
pass
2018-03-15 12:51:57 +03:00
typ = sorted ( funcs . keys ( ) )
2011-02-16 18:57:50 +03:00
for type in typ :
2018-03-20 09:48:53 +03:00
if type in [ ' ' , " void " , " int " , " char * " , " const char * " ] :
2011-02-16 18:57:50 +03:00
continue
output . write ( " <type name= ' %s ' > \n " % ( type ) )
ids = funcs [ type ]
ids . sort ( )
pid = ' ' # not sure why we have dups, but get rid of them!
for id in ids :
if id != pid :
output . write ( " <ref name= ' %s ' /> \n " % ( id ) )
pid = id
output . write ( " </type> \n " )
2005-12-01 20:34:21 +03:00
def serialize_xrefs_constructors ( self , output ) :
funcs = { }
2011-02-16 18:57:50 +03:00
for name in self . idx . functions . keys ( ) :
id = self . idx . functions [ name ]
try :
( ret , params , desc ) = id . info
if ret [ 0 ] == " void " :
continue
2018-03-15 12:39:49 +03:00
if ret [ 0 ] in funcs :
2011-02-16 18:57:50 +03:00
funcs [ ret [ 0 ] ] . append ( name )
else :
funcs [ ret [ 0 ] ] = [ name ]
2019-09-24 15:42:51 +03:00
except Exception :
2011-02-16 18:57:50 +03:00
pass
2018-03-15 12:51:57 +03:00
typ = sorted ( funcs . keys ( ) )
2011-02-16 18:57:50 +03:00
for type in typ :
2018-03-20 09:48:53 +03:00
if type in [ ' ' , " void " , " int " , " char * " , " const char * " ] :
2011-02-16 18:57:50 +03:00
continue
output . write ( " <type name= ' %s ' > \n " % ( type ) )
2018-03-15 12:51:57 +03:00
ids = sorted ( funcs [ type ] )
2011-02-16 18:57:50 +03:00
for id in ids :
output . write ( " <ref name= ' %s ' /> \n " % ( id ) )
output . write ( " </type> \n " )
2005-12-01 20:34:21 +03:00
def serialize_xrefs_alpha ( self , output ) :
2011-02-16 18:57:50 +03:00
letter = None
2018-03-15 12:51:57 +03:00
ids = sorted ( self . idx . identifiers . keys ( ) )
2011-02-16 18:57:50 +03:00
for id in ids :
if id [ 0 ] != letter :
2013-08-22 13:16:03 +04:00
if letter is not None :
2011-02-16 18:57:50 +03:00
output . write ( " </letter> \n " )
letter = id [ 0 ]
output . write ( " <letter name= ' %s ' > \n " % ( letter ) )
output . write ( " <ref name= ' %s ' /> \n " % ( id ) )
2013-08-22 13:16:03 +04:00
if letter is not None :
2011-02-16 18:57:50 +03:00
output . write ( " </letter> \n " )
2005-12-01 20:34:21 +03:00
def serialize_xrefs_references ( self , output ) :
2018-03-15 12:51:57 +03:00
typ = sorted ( self . idx . identifiers . keys ( ) )
2011-02-16 18:57:50 +03:00
for id in typ :
idf = self . idx . identifiers [ id ]
module = idf . header
output . write ( " <reference name= ' %s ' href= ' %s ' /> \n " % ( id ,
' html/ ' + self . basename + ' - ' +
self . modulename_file ( module ) + ' .html# ' +
id ) )
2005-12-01 20:34:21 +03:00
def serialize_xrefs_index ( self , output ) :
index = self . xref
2018-03-15 12:51:57 +03:00
typ = sorted ( index . keys ( ) )
2011-02-16 18:57:50 +03:00
letter = None
count = 0
chunk = 0
chunks = [ ]
2019-09-24 15:10:46 +03:00
first_letter = None
2011-02-16 18:57:50 +03:00
for id in typ :
if len ( index [ id ] ) > 30 :
continue
if id [ 0 ] != letter :
2013-08-22 13:16:03 +04:00
if letter is None or count > 200 :
if letter is not None :
2011-02-16 18:57:50 +03:00
output . write ( " </letter> \n " )
output . write ( " </chunk> \n " )
count = 0
2018-03-20 09:48:47 +03:00
chunks . append ( [ " chunk %s " % ( chunk - 1 ) , first_letter , letter ] )
2011-02-16 18:57:50 +03:00
output . write ( " <chunk name= ' chunk %s ' > \n " % ( chunk ) )
first_letter = id [ 0 ]
chunk = chunk + 1
2013-08-22 13:16:03 +04:00
elif letter is not None :
2011-02-16 18:57:50 +03:00
output . write ( " </letter> \n " )
letter = id [ 0 ]
output . write ( " <letter name= ' %s ' > \n " % ( letter ) )
output . write ( " <word name= ' %s ' > \n " % ( id ) )
2013-02-07 11:22:01 +04:00
tokens = index [ id ]
2011-02-16 18:57:50 +03:00
tokens . sort ( )
tok = None
for token in tokens :
if tok == token :
continue
tok = token
output . write ( " <ref name= ' %s ' /> \n " % ( token ) )
count = count + 1
output . write ( " </word> \n " )
2013-08-22 13:16:03 +04:00
if letter is not None :
2011-02-16 18:57:50 +03:00
output . write ( " </letter> \n " )
output . write ( " </chunk> \n " )
if count != 0 :
2018-03-20 09:48:47 +03:00
chunks . append ( [ " chunk %s " % ( chunk - 1 ) , first_letter , letter ] )
2011-02-16 18:57:50 +03:00
output . write ( " <chunks> \n " )
for ch in chunks :
output . write ( " <chunk name= ' %s ' start= ' %s ' end= ' %s ' /> \n " % (
ch [ 0 ] , ch [ 1 ] , ch [ 2 ] ) )
output . write ( " </chunks> \n " )
2005-12-01 20:34:21 +03:00
def serialize_xrefs ( self , output ) :
2011-02-16 18:57:50 +03:00
output . write ( " <references> \n " )
self . serialize_xrefs_references ( output )
output . write ( " </references> \n " )
output . write ( " <alpha> \n " )
self . serialize_xrefs_alpha ( output )
output . write ( " </alpha> \n " )
output . write ( " <constructors> \n " )
self . serialize_xrefs_constructors ( output )
output . write ( " </constructors> \n " )
output . write ( " <functions> \n " )
self . serialize_xrefs_functions ( output )
output . write ( " </functions> \n " )
output . write ( " <files> \n " )
self . serialize_xrefs_files ( output )
output . write ( " </files> \n " )
output . write ( " <index> \n " )
self . serialize_xrefs_index ( output )
output . write ( " </index> \n " )
2005-12-01 20:34:21 +03:00
def serialize ( self ) :
2011-02-16 17:09:09 +03:00
filename = " %s / %s -api.xml " % ( self . path , self . name )
2011-05-12 14:19:42 +04:00
if not quiet :
2018-03-15 12:30:03 +03:00
print ( " Saving XML description %s " % ( filename ) )
2005-12-01 20:34:21 +03:00
output = open ( filename , " w " )
output . write ( ' <?xml version= " 1.0 " encoding= " ISO-8859-1 " ?> \n ' )
output . write ( " <api name= ' %s ' > \n " % self . name )
output . write ( " <files> \n " )
2018-03-15 12:51:57 +03:00
headers = sorted ( self . headers . keys ( ) )
2005-12-01 20:34:21 +03:00
for file in headers :
self . serialize_exports ( output , file )
output . write ( " </files> \n " )
output . write ( " <symbols> \n " )
2018-03-15 12:51:57 +03:00
macros = sorted ( self . idx . macros . keys ( ) )
2005-12-01 20:34:21 +03:00
for macro in macros :
self . serialize_macro ( output , macro )
2018-03-15 12:51:57 +03:00
enums = sorted ( self . idx . enums . keys ( ) )
2005-12-01 20:34:21 +03:00
for enum in enums :
self . serialize_enum ( output , enum )
2018-03-15 12:51:57 +03:00
typedefs = sorted ( self . idx . typedefs . keys ( ) )
2005-12-01 20:34:21 +03:00
for typedef in typedefs :
self . serialize_typedef ( output , typedef )
2018-03-15 12:51:57 +03:00
variables = sorted ( self . idx . variables . keys ( ) )
2005-12-01 20:34:21 +03:00
for variable in variables :
self . serialize_variable ( output , variable )
2018-03-15 12:51:57 +03:00
functions = sorted ( self . idx . functions . keys ( ) )
2005-12-01 20:34:21 +03:00
for function in functions :
self . serialize_function ( output , function )
output . write ( " </symbols> \n " )
output . write ( " </api> \n " )
output . close ( )
2013-01-29 18:35:28 +04:00
if self . errors > 0 :
2018-03-15 12:30:03 +03:00
print ( " apibuild.py: %d error(s) encountered during generation " % self . errors , file = sys . stderr )
2013-01-29 18:35:28 +04:00
sys . exit ( 3 )
2011-02-16 17:09:09 +03:00
filename = " %s / %s -refs.xml " % ( self . path , self . name )
2011-05-12 14:19:42 +04:00
if not quiet :
2018-03-15 12:30:03 +03:00
print ( " Saving XML Cross References %s " % ( filename ) )
2005-12-01 20:34:21 +03:00
output = open ( filename , " w " )
output . write ( ' <?xml version= " 1.0 " encoding= " ISO-8859-1 " ?> \n ' )
output . write ( " <apirefs name= ' %s ' > \n " % self . name )
self . serialize_xrefs ( output )
output . write ( " </apirefs> \n " )
output . close ( )
2016-04-25 15:26:50 +03:00
class app :
def warning ( self , msg ) :
global warnings
warnings = warnings + 1
2018-03-15 12:30:03 +03:00
print ( msg )
2016-04-25 15:26:50 +03:00
def rebuild ( self , name ) :
if name not in [ " libvirt " , " libvirt-qemu " , " libvirt-lxc " , " libvirt-admin " ] :
2016-04-25 15:30:14 +03:00
self . warning ( " rebuild() failed, unknown module %s " % name )
2016-04-25 15:26:50 +03:00
return None
builder = None
srcdir = os . path . abspath ( ( os . environ [ " srcdir " ] ) )
builddir = os . path . abspath ( ( os . environ [ " builddir " ] ) )
2018-03-20 09:48:46 +03:00
if glob . glob ( srcdir + " /../src/libvirt.c " ) != [ ] :
2016-04-25 15:26:50 +03:00
if not quiet :
2018-03-15 12:30:03 +03:00
print ( " Rebuilding API description for %s " % name )
2016-04-25 15:26:50 +03:00
dirs = [ srcdir + " /../src " ,
2019-10-15 13:41:29 +03:00
srcdir + " /../src/admin " ,
2016-04-25 15:26:50 +03:00
srcdir + " /../src/util " ,
2019-10-18 18:20:30 +03:00
srcdir + " /../include/libvirt " ,
builddir + " /../include/libvirt " ]
2019-10-18 18:21:06 +03:00
builder = docBuilder ( name , builddir , dirs , [ ] )
2016-04-25 15:26:50 +03:00
else :
self . warning ( " rebuild() failed, unable to guess the module " )
return None
builder . scan ( )
builder . analyze ( )
builder . serialize ( )
return builder
#
# for debugging the parser
#
def parse ( self , filename ) :
parser = CParser ( filename )
idx = parser . parse ( )
return idx
2005-12-01 20:34:21 +03:00
if __name__ == " __main__ " :
2016-04-25 15:26:50 +03:00
app = app ( )
2005-12-01 20:34:21 +03:00
if len ( sys . argv ) > 1 :
debug = 1
2016-04-25 15:26:50 +03:00
app . parse ( sys . argv [ 1 ] )
2005-12-01 20:34:21 +03:00
else :
2016-04-25 15:26:50 +03:00
app . rebuild ( " libvirt " )
app . rebuild ( " libvirt-qemu " )
app . rebuild ( " libvirt-lxc " )
app . rebuild ( " libvirt-admin " )
2011-05-12 14:19:42 +04:00
if warnings > 0 :
sys . exit ( 2 )
else :
sys . exit ( 0 )