if member.has_inbody_description():
reporter.doc_note(member, "has in-body comments, which are ignored")
+def check_cycles(graph, reporter):
+ """Check cyclic dependencies in a dependency graph.
+
+ The graph parameter provides the graph to check. It should be an object
+ that has three methods:
+ iternodes():
+ Return the list of nodes in the graph.
+ iteredges(node):
+ Return the list of edges from a given node.
+ The list should contain (node, edge) pairs, where node is an object
+ returned by iternodes() and edge is any object.
+ report_cycle(cycle, reporter):
+ Process a found cycle. cycle contains a list of (node, edge) pairs
+ that describe the cycle. edge is the edge object that leads _to_
+ the node in the cycle.
+
+ This is implemented using an extended DFS-based strongly connected
+ component (SCC) search, written using a stack instead of recursion.
+ The base algorithm is Tarjan's SCC search:
+ http://en.wikipedia.org/wiki/Tarjan's_strongly_connected_components_algorithm
+
+ Each back edge that is encountered during the search is reported as a
+ cycle. Additionally, if a cross edge is encountered that is within the
+ current SCC, the target node and all its children in the current SCC will
+ be visited again to find all cycles. All steps except cycle detection are
+ omitted for such re-traversal.
+
+ To avoid duplicates from cycles that do not include all nodes in an SCC,
+ a cycle is only reported if the target of the back edge is still active
+ in the search, i.e., all edges from it have not yet been traversed.
+ """
+ # The DFS stack; next node is always popped from the end.
+ # Stores (node, edge) pairs.
+ # edge is None for start nodes and for post-order processing.
+ dfsstack = []
+ for node in graph.iternodes():
+ dfsstack.append((node, None))
+ # Stack of visited nodes that have not yet been assigned to a strongly
+ # connected component.
+ visitstack = []
+ # List of nodes in the DFS recursion stack.
+ currlist = []
+ # Set of nodes in currlist for more efficient searching.
+ currset = set()
+ # Counter for initializing preorder.
+ visit_count = 0
+ # DFS pre-order for nodes: initialized when a node is first encountered
+ # in the search.
+ preorder = dict()
+ # Lowest pre-order index reachable from this node.
+ # Initialized to pre-order, and updated during post-order processing.
+ linkorder = dict()
+ # Set to True for a node when first encountered, and set to False when
+ # a strongly connected component has been processed.
+ in_progress = dict()
+ # The DFS search
+ while dfsstack:
+ currnode, curredge = dfsstack.pop()
+ # curredge is None if this is a start node or post-order traversal.
+ # currlist is empty if this is a start node.
+ if curredge is None and currlist:
+ # All children visited: post-order processing.
+ done = currlist.pop()[0]
+ assert done == currnode
+ currset.remove(currnode)
+ # If this is the first time this node is encountered, fill
+ # linkorder and check for strongly connected components.
+ if linkorder[currnode] == preorder[currnode]:
+ children = [x for x, dummy in graph.iteredges(currnode) if in_progress[x]]
+ if children:
+ linkorder[currnode] = min([linkorder[x] for x in children])
+ if preorder[currnode] <= linkorder[currnode]:
+ # This is a root of a strongly connected component.
+ while visitstack:
+ node = visitstack.pop()
+ in_progress[node] = False
+ if node == currnode:
+ break
+ else:
+ assert False
+ continue
+ if currnode not in preorder:
+ # First encounter of this node: pre-order processing.
+ preorder[currnode] = visit_count
+ linkorder[currnode] = visit_count
+ visitstack.append(currnode)
+ visit_count += 1
+ in_progress[currnode] = True
+ elif not in_progress[currnode]:
+ # Do not enter processed components again.
+ continue
+ currlist.append((currnode, curredge))
+ currset.add(currnode)
+ # add entry for post-order traversal
+ dfsstack.append((currnode, None))
+ for nextnode, edge in graph.iteredges(currnode):
+ if nextnode not in preorder:
+ # Not seen previously: push
+ dfsstack.append((nextnode, edge))
+ else:
+ # If an already visited node is in the same component, it is
+ # either part of a cycle, or we need to traverse it again to
+ # find all cycles.
+ if in_progress[nextnode]:
+ if nextnode not in currset:
+ dfsstack.append((nextnode, edge))
+ # Only report cycles to nodes that haven't been processed
+ # yet to avoid duplicates.
+ elif linkorder[nextnode] == preorder[nextnode]:
+ for index in xrange(len(currlist)):
+ if currlist[index][0] == nextnode:
+ cycle = [(nextnode, edge)]
+ cycle.extend(currlist[index+1:])
+ graph.report_cycle(cycle, reporter)
+ break
+ else:
+ assert False
+
+class ModuleDependencyGraph(object):
+
+ """Module dependency graph representation for check_cycles().
+
+ In the reported graph, the nodes are gmxtree.Module objects and the edges
+ are gmxtree.ModuleDependency objects.
+ """
+
+ def __init__(self, tree):
+ self._tree = tree
+
+ def iternodes(self):
+ for module in self._tree.get_modules():
+ if module.get_name() != 'module_testutils':
+ yield module
+
+ def iteredges(self, module):
+ for dependency in module.get_dependencies():
+ if dependency.get_other_module().get_name() != 'module_testutils':
+ yield (dependency.get_other_module(), dependency)
+
+ def report_cycle(self, cycle, reporter):
+ if any([x[1].is_cycle_suppressed() for x in cycle]):
+ # TODO: Report unused suppressions.
+ return
+ modulelist = ' -> '.join([x[0].get_name()[7:] for x in cycle])
+ summary = 'module-level cyclic dependency: ' + modulelist
+ reporter.cyclic_issue(summary)
+
def main():
"""Run the checking script."""
parser = OptionParser()
help='Write issues into a given log file in addition to stderr')
parser.add_option('--ignore',
help='Set file with patterns for messages to ignore')
+ parser.add_option('--ignore-cycles',
+ help='Set file with module dependencies to ignore in cycles')
parser.add_option('--check-ignored', action='store_true',
help='Issue notes for comments ignored by Doxygen')
parser.add_option('-q', '--quiet', action='store_true',
if not options.quiet:
sys.stderr.write('Reading source files...\n')
tree.scan_files()
+ if options.ignore_cycles:
+ tree.load_cycle_suppression_list(options.ignore_cycles)
if not options.quiet:
sys.stderr.write('Reading Doxygen XML files...\n')
tree.load_xml()
for memberobj in tree.get_members():
check_member(memberobj, reporter, options.check_ignored)
+ check_cycles(ModuleDependencyGraph(tree), reporter)
+
reporter.write_pending()
reporter.report_unused_filters()
reporter.close_log()
run automatically.
The checker currently checks for a few different types of issues:
-* For all Doxygen documentation (currently does not apply for members within
- anonymous namespaces or members that do not appear in the documentation):
+* For all Doxygen documentation (currently does not apply for members that do
+ not appear in the documentation):
* If a member has documentation, it should have a brief description.
* A note is issued for in-body documentation for functions, since this is
ignored by our current settings.
(\c \\defgroup module_foo exists for the subdirectory):
* Such files should not be included from outside their module if they are
undocumented or are not specified as part of library or public API.
+* For all modules:
+ * There should not be cyclic include dependencies between modules.
The checker is based on extracting the Doxygen documentation in XML format.
This information is then read using a Python script, and combined with
must be added. To cover many similar issues, parts of the line can then be
replaced with wildcards.
+A separate suppression mechanism is in place for cyclic dependencies: to
+suppress a cycle between moduleA and moduleB, add a line with format
+
+ moduleA -> moduleB
+
+into `doxygen/cycle-suppressions.txt`. This suppresses all cycles that contain
+the mentioned edge. Since a cycle contains multiple edges, the suppression
+should be made for the edge that is determined to be an incorrect dependency.
+
For some false positives from the script, the suppression mechanism is the
easiest way to silence the script, but otherwise the goal would be to minimize
the number of suppressions.
for fileobj in self._files:
yield fileobj
+class ModuleDependency(object):
+
+ """Dependency between modules."""
+
+ def __init__(self, othermodule):
+ """Initialize empty dependency object with given module as dependency."""
+ self._othermodule = othermodule
+ self._includedfiles = []
+ self._cyclesuppression = None
+
+ def add_included_file(self, includedfile):
+ """Add IncludedFile that is part of this dependency."""
+ assert includedfile.get_file().get_module() == self._othermodule
+ self._includedfiles.append(includedfile)
+
+ def set_cycle_suppression(self):
+ """Set suppression on cycles containing this dependency."""
+ self._cyclesuppression = True
+
+ def is_cycle_suppressed(self):
+ """Return whether cycles containing this dependency are suppressed."""
+ return self._cyclesuppression is not None
+
+ def get_other_module(self):
+ """Get module that this dependency is to."""
+ return self._othermodule
+
class Module(object):
"""Code module in the GROMACS source tree.
self._rawdoc = None
self._rootdir = rootdir
self._group = None
+ self._dependencies = dict()
def set_doc_xml(self, rawdoc, sourcetree):
"""Assiociate Doxygen documentation entity with the module."""
if groupname.startswith('group_'):
self._group = groupname[6:]
+ def add_dependency(self, othermodule, includedfile):
+ """Add #include dependency from a file in this module."""
+ assert includedfile.get_file().get_module() == othermodule
+ if othermodule not in self._dependencies:
+ self._dependencies[othermodule] = ModuleDependency(othermodule)
+ self._dependencies[othermodule].add_included_file(includedfile)
+
def is_documented(self):
return self._rawdoc is not None
def get_group(self):
return self._group
+ def get_dependencies(self):
+ return self._dependencies.itervalues()
+
class Namespace(object):
"""Namespace in the GROMACS source code."""
for fileobj in self._files.itervalues():
if not fileobj.is_external():
fileobj.scan_contents(self)
+ module = fileobj.get_module()
+ if module:
+ for includedfile in fileobj.get_includes():
+ otherfile = includedfile.get_file()
+ if otherfile:
+ othermodule = otherfile.get_module()
+ if othermodule and othermodule != module:
+ module.add_dependency(othermodule, includedfile)
def load_xml(self, only_files=False):
"""Load Doxygen XML information.
continue
self._files[relpath].set_installed()
+ def load_cycle_suppression_list(self, filename):
+ """Load a list of edges to suppress in cycles.
+
+ These edges between modules, if present, will be marked in the
+ corresponding ModuleDependency objects.
+ """
+ with open(filename, 'r') as fp:
+ for line in fp:
+ line = line.strip()
+ if not line or line.startswith('#'):
+ continue
+ modulenames = ['module_' + x.strip() for x in line.split('->')]
+ if len(modulenames) != 2:
+ self._reporter.input_error(
+ "invalid cycle suppression line: {0}".format(line))
+ continue
+ firstmodule = self._modules.get(modulenames[0])
+ secondmodule = self._modules.get(modulenames[1])
+ if not firstmodule or not secondmodule:
+ self._reporter.input_error(
+ "unknown modules mentioned on cycle suppression line: {0}".format(line))
+ continue
+ for dep in firstmodule.get_dependencies():
+ if dep.get_other_module() == secondmodule:
+ # TODO: Check that each suppression is actually part of
+ # a cycle.
+ dep.set_cycle_suppression()
+
def get_object(self, docobj):
"""Get tree object for a Doxygen XML object."""
if docobj is None: