diff options
author | Chris St. Pierre <chris.a.st.pierre@gmail.com> | 2013-04-24 13:47:31 -0400 |
---|---|---|
committer | Chris St. Pierre <chris.a.st.pierre@gmail.com> | 2013-04-24 13:47:31 -0400 |
commit | 0ff6b2788de683dd89203c7ae1393ea922a62c32 (patch) | |
tree | 54ce843377ab26c6336de7f1abf3ec906d49aa69 | |
parent | 46a47b4120b3d892b8149a5e181e4d976ad87f99 (diff) | |
parent | 29399cbc599919fd9c88448bde692132c803e69b (diff) | |
download | bcfg2-0ff6b2788de683dd89203c7ae1393ea922a62c32.tar.gz bcfg2-0ff6b2788de683dd89203c7ae1393ea922a62c32.tar.bz2 bcfg2-0ff6b2788de683dd89203c7ae1393ea922a62c32.zip |
Merge branch 'maint'
Conflicts:
src/lib/Bcfg2/Client/Client.py
src/lib/Bcfg2/Client/Frame.py
src/lib/Bcfg2/Client/Tools/YUM.py
src/lib/Bcfg2/Options.py
src/lib/Bcfg2/Server/Admin/Perf.py
src/lib/Bcfg2/Server/Admin/Xcmd.py
src/lib/Bcfg2/Server/Admin/__init__.py
src/lib/Bcfg2/Server/Core.py
src/lib/Bcfg2/Server/FileMonitor/Fam.py
src/lib/Bcfg2/Server/Lint/RequiredAttrs.py
src/lib/Bcfg2/Server/Plugin/helpers.py
src/lib/Bcfg2/Server/Plugins/Base.py
src/lib/Bcfg2/Server/Plugins/Bundler.py
src/lib/Bcfg2/Server/Plugins/Cfg/CfgPrivateKeyCreator.py
src/lib/Bcfg2/Server/Plugins/Cvs.py
src/lib/Bcfg2/Server/Plugins/Darcs.py
src/lib/Bcfg2/Server/Plugins/Decisions.py
src/lib/Bcfg2/Server/Plugins/FileProbes.py
src/lib/Bcfg2/Server/Plugins/Fossil.py
src/lib/Bcfg2/Server/Plugins/Git.py
src/lib/Bcfg2/Server/Plugins/Metadata.py
src/lib/Bcfg2/Server/Plugins/NagiosGen.py
src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py
src/lib/Bcfg2/Server/Plugins/Packages/Source.py
src/lib/Bcfg2/Server/Plugins/Packages/Yum.py
src/lib/Bcfg2/Server/Plugins/Properties.py
src/lib/Bcfg2/Server/Plugins/__init__.py
src/lib/Bcfg2/Server/__init__.py
src/sbin/bcfg2-build-reports
src/sbin/bcfg2-crypt
testsuite/Testsrc/Testlib/TestServer/TestPlugin/Testhelpers.py
testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestMetadata.py
testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestProperties.py
107 files changed, 1562 insertions, 840 deletions
diff --git a/.travis.yml b/.travis.yml index f5aade735..73b8a9594 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,16 +3,9 @@ python: - "2.5" - "2.6" - "2.7" - - "3.2" env: - WITH_OPTIONAL_DEPS=yes - WITH_OPTIONAL_DEPS=no -matrix: - exclude: - - python: "3.2" - env: WITH_OPTIONAL_DEPS=yes - - python: "3.3" - env: WITH_OPTIONAL_DEPS=yes before_install: - testsuite/before_install.sh install: diff --git a/doc/development/compat.txt b/doc/development/compat.txt index 90df45676..f90274ce5 100644 --- a/doc/development/compat.txt +++ b/doc/development/compat.txt @@ -100,6 +100,8 @@ behavior (e.g., :func:`input`) do not cause unexpected side-effects. +---------------------------------+--------------------------------------------------+---------------------------------------------------------+ | long | :func:`long` | :func:`int` | +---------------------------------+--------------------------------------------------+---------------------------------------------------------+ +| cmp | :func:`cmp` | Not implemented | ++---------------------------------+--------------------------------------------------+---------------------------------------------------------+ Python 2.4 compatibility ------------------------ diff --git a/doc/reports/dynamic.txt b/doc/reports/dynamic.txt index b3028e9e1..9de3f868f 100644 --- a/doc/reports/dynamic.txt +++ b/doc/reports/dynamic.txt @@ -53,40 +53,41 @@ Prerequisites Install ------- -Be sure to include the specified fields included in the example -``bcfg2.conf`` file. These can be specified in either ``/etc/bcfg2.conf``, -if it is readable by the webserver user, or ``/etc/bcfg2-web.conf``. Any -database supported by `Django <http://www.djangoproject.com>`_ can be used. -As of version 1.3, `South <http://south.aeracode.org>`_ is used to control -schema changes. If your database is not supported by South, any updates -will need to be applied manually. Sqlite is configured by default. -Please see the :ref:`reporting-databases` section to configure alternative -databases. -.. warning:: +1. Be sure to include the specified fields included in the example + ``bcfg2.conf`` file. These can be specified in either + ``/etc/bcfg2.conf``, if it is readable by the webserver user, + or ``/etc/bcfg2-web.conf``. Any database supported by `Django + <http://www.djangoproject.com>`_ can be used. As of version 1.3, + `South <http://south.aeracode.org>`_ is used to control schema changes. + If your database is not supported by South, any updates will need to + be applied manually. Sqlite is configured by default. Please see the + :ref:`reporting-databases` section to configure alternative databases. - If you are using an sqlite database, the directory containing the - database file will need to be writable by the web server. The reason - for this is that sqlite will create another file for its journal - when it tries to update the database file. + .. warning:: -.. note:: + If you are using an sqlite database, the directory containing the + database file will need to be writable by the web server. The reason + for this is that sqlite will create another file for its journal + when it tries to update the database file. + + .. note:: - Distributed environments can share a single remote database for - reporting. + Distributed environments can share a single remote database for + reporting. -After configuring your database be sure to run `bcfg2-admin reports init` -to create the schema. +2. After configuring your database be sure to run ``bcfg2-admin reports + init`` to create the schema. -To enable statistics collection in the bcfg2-server, add -:ref:`server-plugins-statistics-reporting` to the **plugins** -line in your ``bcfg2.conf`` and restart the bcfg2-server. A report collecting -daemon should be run to import the collected statistics into the backend. -Please see the section :ref:`Report Collector <report_collector>` for more -information. +3. To enable statistics collection in the bcfg2-server, add + :ref:`server-plugins-statistics-reporting` to the **plugins** + line in your ``bcfg2.conf`` and restart the bcfg2-server. A report + collecting daemon should be run to import the collected statistics + into the backend. Please see the section :ref:`Report Collector + <report_collector>` for more information. -Detailed installation instructions can be found :ref:`here -<appendix-guides-web-reports-install>`. + Detailed installation instructions can be found :ref:`here + <appendix-guides-web-reports-install>`. .. _dynamic-http-install: @@ -175,7 +176,7 @@ Upgrading .. note:: After the database is upgraded all of the old tables are left - intact. To remove them any table starting with reports_ can + intact. To remove them any table starting with **reports\_** can be dropped. 4. `(Optional)` Run the :ref:`Report Collector <report_collector>` @@ -199,11 +200,6 @@ An example using the defaults is listed below:: host = port = - [statistics] - config = /etc/bcfg2-web.conf - time_zone = - web_debug = False - [reporting] transport = DirectStore web_prefix = @@ -241,6 +237,8 @@ section: statistics ^^^^^^^^^^ +.. deprecated: 1.3.0 + * config: The config file to be read for additional reporting data. This is used to restrict what can be read by the web server. diff --git a/doc/server/plugins/connectors/grouplogic.txt b/doc/server/plugins/connectors/grouplogic.txt new file mode 100644 index 000000000..b9a5b00d6 --- /dev/null +++ b/doc/server/plugins/connectors/grouplogic.txt @@ -0,0 +1,122 @@ +.. -*- mode: rst -*- + +.. _server-plugins-connectors-grouplogic: + +========== +GroupLogic +========== + +.. versionadded:: 1.3.2 + +GroupLogic is a connector plugin that lets you use an XML Genshi +template to dynamically set additional groups for clients. + +Usage +===== + +To use the GroupLogic plugin, first do ``mkdir +/var/lib/bcfg2/GroupLogic``. Add ``GroupLogic`` to your ``plugins`` +line in ``/etc/bcfg2.conf``. Next, create +``/var/lib/bcfg2/GroupLogic/groups.xml``: + +.. code-block:: xml + + <GroupLogic xmlns:py="http://genshi.edgewall.org/"> + </GroupLogic> + +``groups.xml`` is structured very similarly to the +:ref:`server-plugins-grouping-metadata` ``groups.xml``. A Group tag +that contains no children is a declaration of membership; a Group or +Client tag that does contain children is a conditional. + +Unlike ``Metadata/groups.xml``, GroupLogic supports genshi templating, +so you can dynamically create groups. ``GroupLogic/groups.xml`` is +rendered for each client, and the groups set in it are added to the +client metadata. + +.. note:: + + Also unlike ``Metadata/groups.xml``, GroupLogic can not be used to + associate bundles with clients directly, or to negate groups. But + you can use GroupLogic to assign a group that is associated with a + bundle in Metadata. + +Consider the case where you have four environments -- dev, test, +staging, and production -- and four components to a web application -- +the frontend, the API, the database server, and the caching proxy. In +order to make files specific to the component *and* to the +environment, you need groups to describe each combination: +webapp-frontend-dev, webapp-frontend-test, and so on. You *could* do +this in ``Metadata/groups.xml``: + +.. code-block:: xml + + <Groups> + <Group name="webapp-frontend"> + <Group name="dev"> + <Group name="webapp-frontend-dev"/> + </Group> + <Group name="test"> + <Group name="webapp-frontend-test"/> + </Group> + ... + </Group> + <Group name="webapp-api"> + ... + </Group> + ... + </Groups> + +Creating the sixteen groups this way is incredibly tedious, and this +is a quite *small* site. GroupLogic can automate this process. + +Assume that we've declared the groups thusly in +``Metadata/groups.xml``: + +.. code-block:: xml + + <Groups> + <Group name="webapp-frontend" category="webapp-component"/> + <Group name="webapp-api" category="webapp-component"/> + <Group name="webapp-db" category="webapp-component"/> + <Group name="webapp-proxy" category="webapp-component"/> + <Group name="dev" category="environment"/> + <Group name="test" category="environment"/> + <Group name="staging" category="environment"/> + <Group name="prod" category="environment"/> + </Groups> + +One way to automate the creation of the groups would be to simply +generate the tedious config: + +.. code-block:: xml + + <GroupLogic xmlns:py="http://genshi.edgewall.org/"> + <py:for each="component in metadata.query.all_groups_in_category("webapp-component")> + <Group name="${component}"> + <py:for each="env in metadata.query.all_groups_in_category("environment")> + <Group name="${env}"> + <Group name="${component}-${env}"/> + </Group> + </py:for> + </Group> + </py:for> + </GroupLogic> + +But, since ``GroupLogic/groups.xml`` is rendered for each client +individually, there's a more elegant way to accomplish the same thing: + +.. code-block:: xml + + <GroupLogic xmlns:py="http://genshi.edgewall.org/"> + <?python +component = metadata.group_in_category("webapp-component") +env = metadata.group_in_category("environment") + ?> + <py:if test="component and env"> + <Group name="${component}-${env}"/> + </py:if> + </GroupLogic> + +This gets only the component and environment for the current client, +and, if both are set, sets the single appropriate group. diff --git a/doc/server/plugins/generators/packages.txt b/doc/server/plugins/generators/packages.txt index 95bf4238a..092cff1ae 100644 --- a/doc/server/plugins/generators/packages.txt +++ b/doc/server/plugins/generators/packages.txt @@ -372,7 +372,7 @@ configs. Simply add entries like these to the appropriate bundles: .. code-block:: xml <Path name="/etc/yum.repos.d/bcfg2.repo"/> - <Path name="/etc/apt/sources.d/bcfg2"/> + <Path name="/etc/apt/sources.list.d/bcfg2-packages-generated-sources.list"/> If you want to change the path to either of those files, you can set ``yum_config`` or ``apt_config`` in ``bcfg2.conf`` to the path to the @@ -640,25 +640,25 @@ It understands the following directives: [packages] section ------------------ -+-------------+------------------------------------------------------+----------+-----------------------------+ -| Name | Description | Values | Default | -+=============+======================================================+==========+=============================+ -| resolver | Enable dependency resolution | Boolean | True | -+-------------+------------------------------------------------------+----------+-----------------------------+ -| metadata | Enable metadata processing. Disabling ``metadata`` | Boolean | True | -| | implies disabling ``resolver`` as well. | | | -+-------------+------------------------------------------------------+----------+-----------------------------+ -| yum_config | The path at which to generate Yum configs. | String | /etc/yum.repos.d/bcfg2.repo | -+-------------+------------------------------------------------------+----------+-----------------------------+ -| apt_config | The path at which to generate APT configs. | String | /etc/apt/sources.d/bcfg2 | -+-------------+------------------------------------------------------+----------+-----------------------------+ -| gpg_keypath | The path on the client RPM GPG keys will be copied | String | /etc/pki/rpm-gpg | -| | to before they are imported on the client. | | | -+-------------+------------------------------------------------------+----------+-----------------------------+ -| version | Set the version attribute used when binding Packages | any|auto | auto | -+-------------+------------------------------------------------------+----------+-----------------------------+ -| cache | Path where Packages will store its cache | String | <repo>/Packages/cache | -+-------------+------------------------------------------------------+----------+-----------------------------+ ++-------------+------------------------------------------------------+----------+-------------------------------------------------------------------+ +| Name | Description | Values | Default | ++=============+======================================================+==========+===================================================================+ +| resolver | Enable dependency resolution | Boolean | True | ++-------------+------------------------------------------------------+----------+-------------------------------------------------------------------+ +| metadata | Enable metadata processing. Disabling ``metadata`` | Boolean | True | +| | implies disabling ``resolver`` as well. | | | ++-------------+------------------------------------------------------+----------+-------------------------------------------------------------------+ +| yum_config | The path at which to generate Yum configs. | String | /etc/yum.repos.d/bcfg2.repo | ++-------------+------------------------------------------------------+----------+-------------------------------------------------------------------+ +| apt_config | The path at which to generate APT configs. | String | /etc/apt/sources.list.d/bcfg2-packages-generated-sources.list | ++-------------+------------------------------------------------------+----------+-------------------------------------------------------------------+ +| gpg_keypath | The path on the client RPM GPG keys will be copied | String | /etc/pki/rpm-gpg | +| | to before they are imported on the client. | | | ++-------------+------------------------------------------------------+----------+-------------------------------------------------------------------+ +| version | Set the version attribute used when binding Packages | any|auto | auto | ++-------------+------------------------------------------------------+----------+-------------------------------------------------------------------+ +| cache | Path where Packages will store its cache | String | <repo>/Packages/cache | ++-------------+------------------------------------------------------+----------+-------------------------------------------------------------------+ [packages:yum] section diff --git a/doc/server/plugins/generators/rules.txt b/doc/server/plugins/generators/rules.txt index bcd7342c9..ccde65eb0 100644 --- a/doc/server/plugins/generators/rules.txt +++ b/doc/server/plugins/generators/rules.txt @@ -99,8 +99,13 @@ describe the attributes available for various Path types. Note that ``secontext`` below expects a full context, not just the type. For instance, "``system_u:object_r:etc_t:s0``", not just ``etc_t``. You can also specify "``__default__``", which will restore -the context of the file to the default set by policy. See -:ref:`server-selinux` for more information. +the context of the file to the default set by policy. If a file has +no default context rule, and you don't wish to set one, you can +specify ``secontext=''`` (i.e., an empty ``secontext``), in which case +the client will not try to manage the SELinux context of the file at +all. + +See :ref:`server-selinux` for more information. Attributes common to all Path tags: diff --git a/doc/server/plugins/generators/sslca.txt b/doc/server/plugins/generators/sslca.txt index 9c3a0806d..2a7e3ecad 100644 --- a/doc/server/plugins/generators/sslca.txt +++ b/doc/server/plugins/generators/sslca.txt @@ -156,7 +156,7 @@ Example .. code-block:: xml <CertInfo> - <SubjectAltName>test.example.com</SubjectAltName> + <subjectAltName>test.example.com</subjectAltName> <Group name="apache"> <Cert key="/etc/pki/tls/private/foo.key" days="730"/> </Group> diff --git a/doc/server/plugins/grouping/metadata.txt b/doc/server/plugins/grouping/metadata.txt index 9fe87c878..832b1a13f 100644 --- a/doc/server/plugins/grouping/metadata.txt +++ b/doc/server/plugins/grouping/metadata.txt @@ -121,20 +121,19 @@ a simple ``groups.xml`` file: <Group name='oracle-server'> <Group name='selinux-enabled' negate='true'/> </Group> - <Client name='foo.eample.com'> + <Client name='foo.example.com'> <Group name='oracle-server'/> <Group name='apache-server'/> </Client> </Groups> -A Group or Client tag that does not contain any child tags is a -declaration of membership; a Group or Client tag that does contain -children is a conditional. So the example above does not assign -either the ``rhel5`` or ``rhel6`` groups to machines in the -``mail-server`` group, but conditionally assigns the -``sendmail-server`` or ``postfix-server`` groups depending on the OS -of the client. (Presumably in this example the OS groups are set by a -probe.) +A Group tag that does not contain any child tags is a declaration of +membership; a Group or Client tag that does contain children is a +conditional. So the example above does not assign either the +``rhel5`` or ``rhel6`` groups to machines in the ``mail-server`` +group, but conditionally assigns the ``sendmail-server`` or +``postfix-server`` groups depending on the OS of the client. +(Presumably in this example the OS groups are set by a probe.) Consequently, a client that is RHEL 5 and a member of the ``mail-server`` profile group would also be a member of the diff --git a/doc/server/selinux.txt b/doc/server/selinux.txt index 9f54b0d68..79384970a 100644 --- a/doc/server/selinux.txt +++ b/doc/server/selinux.txt @@ -142,13 +142,13 @@ necessary. Duplicate Entries ----------------- -It may be necessary to use `BoundSELinux` tags if a single fcontext +It may be necessary to use `BoundSEFcontext` tags if a single fcontext needs two different SELinux types depending on whether it's a symlink or a plain file. For instance: .. code-block:: xml - <BoundSELinux type="fcontext" filetype="symlink" - name="/etc/localtime" selinuxtype="etc_t"/> - <BoundSELinux type="fcontext" filetype="regular" - name="/etc/localtime" selinuxtype="locale_t"/> + <BoundSEFcontext filetype="symlink" + name="/etc/localtime" selinuxtype="etc_t"/> + <BoundSEFcontext filetype="regular" + name="/etc/localtime" selinuxtype="locale_t"/> diff --git a/misc/bcfg2.spec b/misc/bcfg2.spec index 13bc6474e..0b975cb37 100644 --- a/misc/bcfg2.spec +++ b/misc/bcfg2.spec @@ -153,7 +153,7 @@ Requires: bcfg2-server = %{version} # cherrypy 3.2.3 actually doesn't exist yet, but 3.2.2 has bugs that # prevent it from working: # https://bitbucket.org/cherrypy/cherrypy/issue/1154/assertionerror-in-recv-when-ssl-is-enabled -Requires: python-cherrypy > 3.2.2 +Requires: python-cherrypy > 3.3 %description server-cherrypy Bcfg2 helps system administrators produce a consistent, reproducible, @@ -522,4 +522,3 @@ fi * Fri Sep 15 2006 Narayan Desai <desai@mcs.anl.gov> - 0.8.4-1 - Initial log - diff --git a/schemas/grouplogic.xsd b/schemas/grouplogic.xsd new file mode 100644 index 000000000..bf43bceb3 --- /dev/null +++ b/schemas/grouplogic.xsd @@ -0,0 +1,110 @@ +<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema" + xmlns:py="http://genshi.edgewall.org/" xml:lang="en"> + + <xsd:annotation> + <xsd:documentation> + GroupLogic schema for bcfg2 + </xsd:documentation> + </xsd:annotation> + + <xsd:import namespace="http://genshi.edgewall.org/" + schemaLocation="genshi.xsd"/> + + <xsd:complexType name="GroupLogicDeclarationType"> + <xsd:annotation> + <xsd:documentation> + A **GroupLogicDeclarationType** declares a Group to be added + to a client. + </xsd:documentation> + </xsd:annotation> + <xsd:attribute type='xsd:string' name='name' use='required'> + <xsd:annotation> + <xsd:documentation> + The group name + </xsd:documentation> + </xsd:annotation> + </xsd:attribute> + <xsd:attributeGroup ref="py:genshiAttrs"/> + </xsd:complexType> + + <xsd:complexType name="GroupLogicType"> + <xsd:annotation> + <xsd:documentation> + The top-level tag of a GroupLogic configuration file. + </xsd:documentation> + </xsd:annotation> + <xsd:choice minOccurs="1" maxOccurs="unbounded"> + <xsd:group ref="py:genshiElements"/> + <xsd:element name='Group' type='GroupLogicDeclarationType'/> + <xsd:element name='Group' type='GroupLogicContainerType'> + <xsd:annotation> + <xsd:documentation> + Elements within Group tags only apply to clients that are + members of that group (or vice-versa; see #element_negate + below) + </xsd:documentation> + </xsd:annotation> + </xsd:element> + <xsd:element name='Client' type='GroupLogicContainerType'> + <xsd:annotation> + <xsd:documentation> + Elements within Client tags only apply to the named client + (or vice-versa; see #element_negate below) + </xsd:documentation> + </xsd:annotation> + </xsd:element> + <xsd:element name='GroupLogic' type='GroupLogicType'> + <xsd:annotation> + <xsd:documentation> + Nesting GroupLogic tags is allowed in order to support + XInclude. + </xsd:documentation> + </xsd:annotation> + </xsd:element> + </xsd:choice> + <xsd:attributeGroup ref="py:genshiAttrs"/> + </xsd:complexType> + + <xsd:complexType name="GroupLogicContainerType"> + <xsd:annotation> + <xsd:documentation> + A **GroupLogicContainerType** is a tag used to provide logic. + Child entries of a GroupLogicContainerType tag only apply to + machines that match the condition specified -- either + membership in a group, or a matching client name. + :xml:attribute:`GroupLogicContainerType:negate` can be set to + negate the sense of the match. + </xsd:documentation> + </xsd:annotation> + <xsd:complexContent> + <xsd:extension base="GroupLogicType"> + <xsd:attribute type='xsd:string' name='name' use='required'> + <xsd:annotation> + <xsd:documentation> + The group name + </xsd:documentation> + </xsd:annotation> + </xsd:attribute> + <xsd:attribute type='xsd:string' name='negate'> + <xsd:annotation> + <xsd:documentation> + Negate the sense of this group or client; i.e., entries + within this tag are only used on clients that are not + members of the group, or that have hostnames that do not + match. + </xsd:documentation> + </xsd:annotation> + </xsd:attribute> + </xsd:extension> + </xsd:complexContent> + </xsd:complexType> + + <xsd:element name='GroupLogic' type='GroupLogicType'> + <xsd:annotation> + <xsd:documentation> + A GroupLogic file is a genshi file that can be used to + dynamically add additional groups to a client. + </xsd:documentation> + </xsd:annotation> + </xsd:element> +</xsd:schema> diff --git a/schemas/types.xsd b/schemas/types.xsd index 144ef7337..9c4a0a48e 100644 --- a/schemas/types.xsd +++ b/schemas/types.xsd @@ -430,8 +430,10 @@ <xsd:attribute type="xsd:string" name="gecos"> <xsd:annotation> <xsd:documentation> - Human-readable user name or comment. If this is not set, - the GECOS will be the same as the username. + This field is typically used to record general information + about the account or its user(s) such as their real name + and phone number. If this is not set, the GECOS will be + the same as the username. </xsd:documentation> </xsd:annotation> </xsd:attribute> diff --git a/src/lib/Bcfg2/Client/Client.py b/src/lib/Bcfg2/Client/Client.py index 66c1ce430..10a15f160 100644 --- a/src/lib/Bcfg2/Client/Client.py +++ b/src/lib/Bcfg2/Client/Client.py @@ -91,7 +91,10 @@ class Client(object): try: script.write("#!%s\n" % (probe.attrib.get('interpreter', '/bin/sh'))) - script.write(probe.text) + if sys.hexversion >= 0x03000000: + script.write(probe.text) + else: + script.write(probe.text.encode('utf-8')) script.close() os.chmod(scriptname, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | @@ -105,7 +108,10 @@ class Client(object): self._probe_failure(name, "Return value %s" % rv) self.logger.info("Probe %s has result:" % name) self.logger.info(rv.stdout) - ret.text = rv.stdout + if sys.hexversion >= 0x03000000: + ret.text = rv.stdout + else: + ret.text = rv.stdout.decode('utf-8') finally: os.unlink(scriptname) except SystemExit: @@ -164,10 +170,11 @@ class Client(object): if len(probes.findall(".//probe")) > 0: try: # upload probe responses - self.proxy.RecvProbeData(Bcfg2.Client.XML.tostring( + self.proxy.RecvProbeData( + Bcfg2.Client.XML.tostring( probedata, - xml_declaration=False).decode('UTF-8')) - except Bcfg2.Client.Proxy.ProxyError: + xml_declaration=False).decode('utf-8')) + except Bcfg2.Proxy.ProxyError: err = sys.exc_info()[1] self.fatal_error("Failed to upload probe data: %s" % err) @@ -228,7 +235,7 @@ class Client(object): self.fatal_error("Failed to get decision list: %s" % err) try: - rawconfig = self.proxy.GetConfig().encode('UTF-8') + rawconfig = self.proxy.GetConfig().encode('utf-8') except Bcfg2.Client.Proxy.ProxyError: err = sys.exc_info()[1] self.fatal_error("Failed to download configuration from " @@ -246,7 +253,7 @@ class Client(object): self.logger.info("Starting Bcfg2 client run at %s" % times['start']) - rawconfig = self.get_config(times=times) + rawconfig = self.get_config(times=times).decode('utf-8') if self.setup['cache']: try: @@ -317,9 +324,10 @@ class Client(object): feedback = self.tools.GenerateStats() try: - self.proxy.RecvStats(Bcfg2.Client.XML.tostring( + self.proxy.RecvStats( + Bcfg2.Client.XML.tostring( feedback, - xml_declaration=False).decode('UTF-8')) + xml_declaration=False).decode('utf-8')) except Bcfg2.Client.Proxy.ProxyError: err = sys.exc_info()[1] self.logger.error("Failed to upload configuration statistics: " diff --git a/src/lib/Bcfg2/Client/Frame.py b/src/lib/Bcfg2/Client/Frame.py index 6ef686c10..11a82fcc0 100644 --- a/src/lib/Bcfg2/Client/Frame.py +++ b/src/lib/Bcfg2/Client/Frame.py @@ -6,8 +6,8 @@ import fnmatch import logging import Bcfg2.Client.Tools from Bcfg2.Client import prompt -from Bcfg2.Compat import any, all # pylint: disable=W0622 from Bcfg2.Options import get_option_parser +from Bcfg2.Compat import any, all, cmp # pylint: disable=W0622 def cmpent(ent1, ent2): @@ -67,8 +67,8 @@ class Frame(object): self.logger = logging.getLogger(__name__) drivers = self.setup['drivers'] for driver in drivers[:]: - if driver not in Bcfg2.Client.Tools.__all__ and \ - isinstance(driver, str): + if (driver not in Bcfg2.Client.Tools.__all__ and + isinstance(driver, str)): self.logger.error("Tool driver %s is not available" % driver) drivers.remove(driver) @@ -134,7 +134,7 @@ class Frame(object): if entry.tag == 'Package'] if pkgs: self.logger.debug("The following packages are specified in bcfg2:") - self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] == None]) + self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] is None]) self.logger.debug("The following packages are prereqs added by " "Packages:") self.logger.debug([pkg[0] for pkg in pkgs if pkg[1] == 'Packages']) @@ -193,19 +193,19 @@ class Frame(object): """ # Need to process decision stuff early so that dryrun mode # works with it - self.whitelist = [entry for entry in self.states \ + self.whitelist = [entry for entry in self.states if not self.states[entry]] if not self.setup['file']: if self.setup['decision'] == 'whitelist': dwl = self.setup['decision_list'] - w_to_rem = [e for e in self.whitelist \ + w_to_rem = [e for e in self.whitelist if not matches_white_list(e, dwl)] if w_to_rem: self.logger.info("In whitelist mode: " "suppressing installation of:") self.logger.info(["%s:%s" % (e.tag, e.get('name')) for e in w_to_rem]) - self.whitelist = [x for x in self.whitelist \ + self.whitelist = [x for x in self.whitelist if x not in w_to_rem] elif self.setup['decision'] == 'blacklist': b_to_rem = \ @@ -236,7 +236,7 @@ class Frame(object): cfile not in self.whitelist): continue tools = [t for t in self.tools - if t.handlesEntry(cfile) and t.canVerify(cfile)] + if t.handlesEntry(cfile) and t.canVerify(cfile)] if not tools: continue if (self.setup['interactive'] and not @@ -395,8 +395,8 @@ class Frame(object): """Install all entries.""" self.DispatchInstallCalls(self.whitelist) mods = self.modified - mbundles = [struct for struct in self.config.findall('Bundle') if \ - [mod for mod in mods if mod in struct]] + mbundles = [struct for struct in self.config.findall('Bundle') + if any(True for mod in mods if mod in struct)] if self.modified: # Handle Bundle interdeps @@ -411,33 +411,34 @@ class Frame(object): self.logger.error("%s.Inventory() call failed:" % tool.name, exc_info=1) - clobbered = [entry for bundle in mbundles for entry in bundle \ + clobbered = [entry for bundle in mbundles for entry in bundle if (not self.states[entry] and entry not in self.blacklist)] if clobbered: self.logger.debug("Found clobbered entries:") - self.logger.debug(["%s:%s" % (entry.tag, entry.get('name')) \ + self.logger.debug(["%s:%s" % (entry.tag, entry.get('name')) for entry in clobbered]) if not self.setup['interactive']: self.DispatchInstallCalls(clobbered) for bundle in self.config.findall('.//Bundle'): - if self.setup['bundle'] and \ - bundle.get('name') not in self.setup['bundle']: + if (self.setup['bundle'] and + bundle.get('name') not in self.setup['bundle']): # prune out unspecified bundles when running with -b continue + if bundle in mbundles: + self.logger.debug("Bundle %s was modified" % bundle) + func = "BundleUpdated" + else: + self.logger.debug("Bundle %s was not modified" % bundle) + func = "BundleNotUpdated" for tool in self.tools: - if bundle in mbundles: - func = tool.BundleUpdated - else: - func = tool.BundleNotUpdated try: - self.states.update(func(bundle)) + self.states.update(getattr(tool, func)(bundle)) except: self.logger.error("%s.%s(%s:%s) call failed:" % - (tool.name, func.im_func.func_name, - bundle.tag, bundle.get("name")), - exc_info=1) + (tool.name, func, bundle.tag, + bundle.get("name")), exc_info=1) for indep in self.config.findall('.//Independent'): for tool in self.tools: @@ -468,7 +469,8 @@ class Frame(object): self.logger.info('Incorrect entries: %d' % list(self.states.values()).count(False)) if phase == 'final' and list(self.states.values()).count(False): - for entry in self.states.keys(): + for entry in sorted(self.states.keys(), key=lambda e: e.tag + ":" + + e.get('name')): if not self.states[entry]: etype = entry.get('type') if etype: diff --git a/src/lib/Bcfg2/Client/Proxy.py b/src/lib/Bcfg2/Client/Proxy.py index f8817bb27..57be34369 100644 --- a/src/lib/Bcfg2/Client/Proxy.py +++ b/src/lib/Bcfg2/Client/Proxy.py @@ -327,6 +327,7 @@ class XMLRPCTransport(xmlrpclib.Transport): return self.parse_response(response) if sys.hexversion < 0x03000000: + # pylint: disable=E1101 def send_request(self, host, handler, request_body, debug): """ send_request() changed significantly in py3k.""" conn = self.make_connection(host) @@ -335,6 +336,7 @@ class XMLRPCTransport(xmlrpclib.Transport): self.send_user_agent(conn) self.send_content(conn, request_body) return conn + # pylint: enable=E1101 def ComponentProxy(url, user=None, password=None, key=None, cert=None, ca=None, diff --git a/src/lib/Bcfg2/Client/Tools/APK.py b/src/lib/Bcfg2/Client/Tools/APK.py index 553339fc9..46f46bb1c 100644 --- a/src/lib/Bcfg2/Client/Tools/APK.py +++ b/src/lib/Bcfg2/Client/Tools/APK.py @@ -27,7 +27,7 @@ class APK(Bcfg2.Client.Tools.PkgTool): """Verify Package status for entry.""" if not 'version' in entry.attrib: self.logger.info("Cannot verify unversioned package %s" % - (entry.attrib['name'])) + entry.attrib['name']) return False if entry.attrib['name'] in self.installed: diff --git a/src/lib/Bcfg2/Client/Tools/Action.py b/src/lib/Bcfg2/Client/Tools/Action.py index 7b62f61c7..fd2c467d7 100644 --- a/src/lib/Bcfg2/Client/Tools/Action.py +++ b/src/lib/Bcfg2/Client/Tools/Action.py @@ -19,12 +19,12 @@ class Action(Bcfg2.Client.Tools.Tool): the whitelist or blacklist """ if self.setup['decision'] == 'whitelist' and \ not matches_white_list(action, self.setup['decision_list']): - self.logger.info("In whitelist mode: suppressing Action:" + \ + self.logger.info("In whitelist mode: suppressing Action: %s" % action.get('name')) return False if self.setup['decision'] == 'blacklist' and \ not passes_black_list(action, self.setup['decision_list']): - self.logger.info("In blacklist mode: suppressing Action:" + \ + self.logger.info("In blacklist mode: suppressing Action: %s" % action.get('name')) return False return True diff --git a/src/lib/Bcfg2/Client/Tools/DebInit.py b/src/lib/Bcfg2/Client/Tools/DebInit.py index ca556e98b..d916b1662 100644 --- a/src/lib/Bcfg2/Client/Tools/DebInit.py +++ b/src/lib/Bcfg2/Client/Tools/DebInit.py @@ -16,7 +16,7 @@ class DebInit(Bcfg2.Client.Tools.SvcTool): __handles__ = [('Service', 'deb')] __req__ = {'Service': ['name', 'status']} svcre = \ - re.compile("/etc/.*/(?P<action>[SK])(?P<sequence>\d+)(?P<name>\S+)") + re.compile(r'/etc/.*/(?P<action>[SK])(?P<sequence>\d+)(?P<name>\S+)') # implement entry (Verify|Install) ops def VerifyService(self, entry, _): diff --git a/src/lib/Bcfg2/Client/Tools/Encap.py b/src/lib/Bcfg2/Client/Tools/Encap.py index 678e0f00c..270f0a5f2 100644 --- a/src/lib/Bcfg2/Client/Tools/Encap.py +++ b/src/lib/Bcfg2/Client/Tools/Encap.py @@ -13,7 +13,7 @@ class Encap(Bcfg2.Client.Tools.PkgTool): __req__ = {'Package': ['version', 'url']} pkgtype = 'encap' pkgtool = ("/usr/local/bin/epkg -l -f -q %s", ("%s", ["url"])) - splitter = re.compile('.*/(?P<name>[\w-]+)\-(?P<version>[\w\.+-]+)') + splitter = re.compile(r'.*/(?P<name>[\w-]+)\-(?P<version>[\w\.+-]+)') def RefreshPackages(self): """Try to find encap packages.""" diff --git a/src/lib/Bcfg2/Client/Tools/FreeBSDPackage.py b/src/lib/Bcfg2/Client/Tools/FreeBSDPackage.py index 635318805..31925fa3c 100644 --- a/src/lib/Bcfg2/Client/Tools/FreeBSDPackage.py +++ b/src/lib/Bcfg2/Client/Tools/FreeBSDPackage.py @@ -21,7 +21,7 @@ class FreeBSDPackage(Bcfg2.Client.Tools.PkgTool): def RefreshPackages(self): self.installed = {} packages = self.cmd.run("/usr/sbin/pkg_info -a -E").stdout.splitlines() - pattern = re.compile('(.*)-(\d.*)') + pattern = re.compile(r'(.*)-(\d.*)') for pkg in packages: if pattern.match(pkg): name = pattern.match(pkg).group(1) @@ -31,7 +31,7 @@ class FreeBSDPackage(Bcfg2.Client.Tools.PkgTool): def VerifyPackage(self, entry, _): if not 'version' in entry.attrib: self.logger.info("Cannot verify unversioned package %s" % - (entry.attrib['name'])) + entry.attrib['name']) return False if entry.attrib['name'] in self.installed: if self.installed[entry.attrib['name']] == entry.attrib['version']: diff --git a/src/lib/Bcfg2/Client/Tools/IPS.py b/src/lib/Bcfg2/Client/Tools/IPS.py index e4c9d605d..c998ff083 100644 --- a/src/lib/Bcfg2/Client/Tools/IPS.py +++ b/src/lib/Bcfg2/Client/Tools/IPS.py @@ -50,9 +50,9 @@ class IPS(Bcfg2.Client.Tools.PkgTool): pass else: if entry.get('version') != self.installed[pname]: - self.logger.debug("IPS: Package %s: have %s want %s" \ - % (pname, self.installed[pname], - entry.get('version'))) + self.logger.debug("IPS: Package %s: have %s want %s" % + (pname, self.installed[pname], + entry.get('version'))) return False # need to implement pkg chksum validation diff --git a/src/lib/Bcfg2/Client/Tools/MacPorts.py b/src/lib/Bcfg2/Client/Tools/MacPorts.py index 5b395a170..dcf58cfec 100644 --- a/src/lib/Bcfg2/Client/Tools/MacPorts.py +++ b/src/lib/Bcfg2/Client/Tools/MacPorts.py @@ -33,7 +33,7 @@ class MacPorts(Bcfg2.Client.Tools.PkgTool): """Verify Package status for entry.""" if not 'version' in entry.attrib: self.logger.info("Cannot verify unversioned package %s" % - (entry.attrib['name'])) + entry.attrib['name']) return False if entry.attrib['name'] in self.installed: diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/Device.py b/src/lib/Bcfg2/Client/Tools/POSIX/Device.py index d5aaf069d..9b84adad0 100644 --- a/src/lib/Bcfg2/Client/Tools/POSIX/Device.py +++ b/src/lib/Bcfg2/Client/Tools/POSIX/Device.py @@ -12,8 +12,8 @@ class POSIXDevice(POSIXTool): def fully_specified(self, entry): if entry.get('dev_type') in ['block', 'char']: # check if major/minor are properly specified - if (entry.get('major') == None or - entry.get('minor') == None): + if (entry.get('major') is None or + entry.get('minor') is None): return False return True diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/File.py b/src/lib/Bcfg2/Client/Tools/POSIX/File.py index 9b95d2234..168c35c98 100644 --- a/src/lib/Bcfg2/Client/Tools/POSIX/File.py +++ b/src/lib/Bcfg2/Client/Tools/POSIX/File.py @@ -34,13 +34,11 @@ class POSIXFile(POSIXTool): def _get_data(self, entry): """ Get a tuple of (<file data>, <is binary>) for the given entry """ - is_binary = False - if entry.get('encoding', 'ascii') == 'base64': - tempdata = b64decode(entry.text) - is_binary = True - - elif entry.get('empty', 'false') == 'true': + is_binary = entry.get('encoding', 'ascii') == 'base64' + if entry.get('empty', 'false') == 'true' or not entry.text: tempdata = '' + elif is_binary: + tempdata = b64decode(entry.text) else: tempdata = entry.text if isinstance(tempdata, unicode) and unicode != str: diff --git a/src/lib/Bcfg2/Client/Tools/POSIX/base.py b/src/lib/Bcfg2/Client/Tools/POSIX/base.py index f46875743..16fe0acb5 100644 --- a/src/lib/Bcfg2/Client/Tools/POSIX/base.py +++ b/src/lib/Bcfg2/Client/Tools/POSIX/base.py @@ -275,7 +275,7 @@ class POSIXTool(Bcfg2.Client.Tools.Tool): if path is None: path = entry.get("name") context = entry.get("secontext") - if context is None: + if not context: # no context listed return True @@ -520,13 +520,19 @@ class POSIXTool(Bcfg2.Client.Tools.Tool): "Current mtime is %s but should be %s" % (path, mtime, entry.get('mtime'))) - if HAS_SELINUX and entry.get("secontext"): + if HAS_SELINUX: + wanted_secontext = None if entry.get("secontext") == "__default__": - wanted_secontext = \ - selinux.matchpathcon(path, 0)[1].split(":")[2] + try: + wanted_secontext = \ + selinux.matchpathcon(path, 0)[1].split(":")[2] + except OSError: + errors.append("%s has no default SELinux context" % + entry.get("name")) else: wanted_secontext = entry.get("secontext") - if attrib['current_secontext'] != wanted_secontext: + if (wanted_secontext and + attrib['current_secontext'] != wanted_secontext): errors.append("SELinux context for path %s is incorrect. " "Current context is %s but should be %s" % (path, attrib['current_secontext'], @@ -712,8 +718,8 @@ class POSIXTool(Bcfg2.Client.Tools.Tool): tmpentry.set('mode', oct_mode(newmode)) for acl in tmpentry.findall('ACL'): acl.set('perms', - oct_mode(self._norm_acl_perms(acl.get('perms')) | \ - ACL_MAP['x'])) + oct_mode(self._norm_acl_perms(acl.get('perms')) | + ACL_MAP['x'])) for cpath in created: rv &= self._set_perms(tmpentry, path=cpath) return rv diff --git a/src/lib/Bcfg2/Client/Tools/Pacman.py b/src/lib/Bcfg2/Client/Tools/Pacman.py index 15fab53bd..d7d60a66d 100644 --- a/src/lib/Bcfg2/Client/Tools/Pacman.py +++ b/src/lib/Bcfg2/Client/Tools/Pacman.py @@ -25,12 +25,12 @@ class Pacman(Bcfg2.Client.Tools.PkgTool): def VerifyPackage(self, entry, _): '''Verify Package status for entry''' - self.logger.info("VerifyPackage : %s : %s" % entry.get('name'), - entry.get('version')) + self.logger.info("VerifyPackage: %s : %s" % (entry.get('name'), + entry.get('version'))) if not 'version' in entry.attrib: self.logger.info("Cannot verify unversioned package %s" % - (entry.attrib['name'])) + entry.attrib['name']) return False if entry.attrib['name'] in self.installed: diff --git a/src/lib/Bcfg2/Client/Tools/Portage.py b/src/lib/Bcfg2/Client/Tools/Portage.py index 32afa8cbf..5e319a414 100644 --- a/src/lib/Bcfg2/Client/Tools/Portage.py +++ b/src/lib/Bcfg2/Client/Tools/Portage.py @@ -13,8 +13,8 @@ class Portage(Bcfg2.Client.Tools.PkgTool): __req__ = {'Package': ['name', 'version']} pkgtype = 'ebuild' # requires a working PORTAGE_BINHOST in make.conf - _binpkgtool = ('emerge --getbinpkgonly %s', ('=%s-%s', \ - ['name', 'version'])) + _binpkgtool = ('emerge --getbinpkgonly %s', ('=%s-%s', ['name', + 'version'])) pkgtool = ('emerge %s', ('=%s-%s', ['name', 'version'])) def __init__(self, config): @@ -22,7 +22,7 @@ class Portage(Bcfg2.Client.Tools.PkgTool): Bcfg2.Client.Tools.PkgTool.__init__(self, config) self._initialised = True self.__important__ = self.__important__ + ['/etc/make.conf'] - self._pkg_pattern = re.compile('(.*)-(\d.*)') + self._pkg_pattern = re.compile(r'(.*)-(\d.*)') self._ebuild_pattern = re.compile('(ebuild|binary)') self.installed = {} self._binpkgonly = self.setup.get('portage_binpkgonly', False) diff --git a/src/lib/Bcfg2/Client/Tools/RcUpdate.py b/src/lib/Bcfg2/Client/Tools/RcUpdate.py index 552b27842..4b78581f7 100644 --- a/src/lib/Bcfg2/Client/Tools/RcUpdate.py +++ b/src/lib/Bcfg2/Client/Tools/RcUpdate.py @@ -12,6 +12,15 @@ class RcUpdate(Bcfg2.Client.Tools.SvcTool): __handles__ = [('Service', 'rc-update')] __req__ = {'Service': ['name', 'status']} + def get_enabled_svcs(self): + """ + Return a list of all enabled services. + """ + return [line.split()[0] + for line in self.cmd.run(['/bin/rc-status', + '-s']).stdout.splitlines() + if 'started' in line] + def VerifyService(self, entry, _): """ Verify Service status for entry. @@ -21,9 +30,12 @@ class RcUpdate(Bcfg2.Client.Tools.SvcTool): if entry.get('status') == 'ignore': return True + # get a list of all started services + allsrv = self.get_enabled_svcs() + # check if service is enabled - result = self.cmd.run(["/sbin/rc-update", "show", "default"]) - is_enabled = entry.get("name") in result.stdout + result = self.cmd.run(["/sbin/rc-update", "show", "default"]).stdout + is_enabled = entry.get("name") in result # check if init script exists try: @@ -34,8 +46,7 @@ class RcUpdate(Bcfg2.Client.Tools.SvcTool): return False # check if service is enabled - result = self.cmd.run(self.get_svc_command(entry, "status")) - is_running = "started" in result.stdout + is_running = entry.get('name') in allsrv if entry.get('status') == 'on' and not (is_enabled and is_running): entry.set('current_status', 'off') @@ -70,10 +81,7 @@ class RcUpdate(Bcfg2.Client.Tools.SvcTool): def FindExtra(self): """Locate extra rc-update services.""" - allsrv = [line.split()[0] - for line in self.cmd.run(['/bin/rc-status', - '-s']).stdout.splitlines() - if 'started' in line] + allsrv = self.get_enabled_svcs() self.logger.debug('Found active services:') self.logger.debug(allsrv) specified = [srv.get('name') for srv in self.getSupportedEntries()] diff --git a/src/lib/Bcfg2/Client/Tools/SELinux.py b/src/lib/Bcfg2/Client/Tools/SELinux.py index baf930610..f38615062 100644 --- a/src/lib/Bcfg2/Client/Tools/SELinux.py +++ b/src/lib/Bcfg2/Client/Tools/SELinux.py @@ -13,6 +13,7 @@ import seobject import Bcfg2.Client.XML import Bcfg2.Client.Tools from Bcfg2.Client.Tools.POSIX.File import POSIXFile +from Bcfg2.Compat import long # pylint: disable=W0622 def pack128(int_val): @@ -48,7 +49,7 @@ def netmask_itoa(netmask, proto="ipv4"): if netmask > size: raise ValueError("Netmask too large: %s" % netmask) - res = 0L + res = long(0) for i in range(netmask): res |= 1 << (size - i - 1) netmask = socket.inet_ntop(family, pack128(res)) @@ -167,7 +168,7 @@ class SELinuxEntryHandler(object): key_format = ("name",) value_format = () str_format = '%(name)s' - custom_re = re.compile(' (?P<name>\S+)$') + custom_re = re.compile(r' (?P<name>\S+)$') custom_format = None def __init__(self, tool, config): @@ -200,7 +201,16 @@ class SELinuxEntryHandler(object): type, if the records object supports the customized() method """ if hasattr(self.records, "customized") and self.custom_re: - return dict([(k, self.all_records[k]) for k in self.custom_keys]) + rv = dict() + for key in self.custom_keys: + if key in self.all_records: + rv[key] = self.all_records[key] + else: + self.logger.warning("SELinux %s %s customized, but no " + "record found. This may indicate an " + "error in your SELinux policy." % + (self.etype, key)) + return rv else: # ValueError is really a pretty dumb exception to raise, # but that's what the seobject customized() method raises @@ -504,14 +514,14 @@ class SELinuxSefcontextHandler(SELinuxEntryHandler): char="-c", door="-D") filetypenames = dict(all="all files", - regular="regular file", - directory="directory", - symlink="symbolic link", - pipe="named pipe", - socket="socket", - block="block device", - char="character device", - door="door") + regular="regular file", + directory="directory", + symlink="symbolic link", + pipe="named pipe", + socket="socket", + block="block device", + char="character device", + door="door") filetypeattrs = dict([v, k] for k, v in filetypenames.iteritems()) custom_re = re.compile(r'-f \'(?P<filetype>[a-z ]+)\'.*? \'(?P<name>.*)\'') diff --git a/src/lib/Bcfg2/Client/Tools/SMF.py b/src/lib/Bcfg2/Client/Tools/SMF.py index 68d8b2965..8b23a4a37 100644 --- a/src/lib/Bcfg2/Client/Tools/SMF.py +++ b/src/lib/Bcfg2/Client/Tools/SMF.py @@ -48,12 +48,12 @@ class SMF(Bcfg2.Client.Tools.SvcTool): gname = "/etc/rc*.d/%s" % filename files = glob.glob(gname.replace('_', '.')) if files: - self.logger.debug("Matched %s with %s" % \ + self.logger.debug("Matched %s with %s" % (entry.get("FMRI"), ":".join(files))) return entry.get('status') == 'on' else: - self.logger.debug("No service matching %s" % \ - (entry.get("FMRI"))) + self.logger.debug("No service matching %s" % + entry.get("FMRI")) return entry.get('status') == 'off' try: srvdata = \ @@ -76,13 +76,12 @@ class SMF(Bcfg2.Client.Tools.SvcTool): if entry.get("FMRI").startswith('lrc'): try: loc = entry.get("FMRI")[4:].replace('_', '.') - self.logger.debug("Renaming file %s to %s" % \ + self.logger.debug("Renaming file %s to %s" % (loc, loc.replace('/S', '/DISABLED.S'))) os.rename(loc, loc.replace('/S', '/DISABLED.S')) return True except OSError: - self.logger.error("Failed to rename init script %s" % \ - (loc)) + self.logger.error("Failed to rename init script %s" % loc) return False else: return self.cmd.run("/usr/sbin/svcadm disable %s" % @@ -118,12 +117,12 @@ class SMF(Bcfg2.Client.Tools.SvcTool): def FindExtra(self): """Find Extra SMF Services.""" - allsrv = [name for name, version in \ - [srvc.split() - for srvc in self.cmd.run([ - "/usr/bin/svcs", "-a", "-H", - "-o", "FMRI,STATE"]).stdout.splitlines()] - if version != 'disabled'] + allsrv = [] + for srvc in self.cmd.run(["/usr/bin/svcs", "-a", "-H", + "-o", "FMRI,STATE"]).stdout.splitlines(): + name, version = srvc.split() + if version != 'disabled': + allsrv.append(name) for svc in self.getSupportedEntries(): if svc.get("FMRI") in allsrv: diff --git a/src/lib/Bcfg2/Client/Tools/SYSV.py b/src/lib/Bcfg2/Client/Tools/SYSV.py index b5afa629d..7be7b6fa3 100644 --- a/src/lib/Bcfg2/Client/Tools/SYSV.py +++ b/src/lib/Bcfg2/Client/Tools/SYSV.py @@ -41,7 +41,7 @@ class SYSV(Bcfg2.Client.Tools.PkgTool): self.noaskfile.write(noask) # flush admin file contents to disk self.noaskfile.flush() - self.pkgtool = (self.pkgtool[0] % ("-a %s" % (self.noaskname)), \ + self.pkgtool = (self.pkgtool[0] % ("-a %s" % (self.noaskname)), self.pkgtool[1]) except: # pylint: disable=W0702 self.pkgtool = (self.pkgtool[0] % "", self.pkgtool[1]) @@ -66,7 +66,7 @@ class SYSV(Bcfg2.Client.Tools.PkgTool): desired_version = entry.get('version') if desired_version == 'any': desired_version = self.installed.get(entry.get('name'), - desired_version) + desired_version) if not self.cmd.run(["/usr/bin/pkginfo", "-q", "-v", desired_version, entry.get('name')]): diff --git a/src/lib/Bcfg2/Client/Tools/Upstart.py b/src/lib/Bcfg2/Client/Tools/Upstart.py index cd1c4a2bc..c96eab69d 100644 --- a/src/lib/Bcfg2/Client/Tools/Upstart.py +++ b/src/lib/Bcfg2/Client/Tools/Upstart.py @@ -46,9 +46,9 @@ class Upstart(Bcfg2.Client.Tools.SvcTool): entry.get('name')) return False - match = re.compile("%s( \(.*\))? (start|stop)/(running|waiting)" % + match = re.compile(r'%s( \(.*\))? (start|stop)/(running|waiting)' % entry.get('name')).match(output) - if match == None: + if match is None: # service does not exist entry.set('current_status', 'off') status = False diff --git a/src/lib/Bcfg2/Client/Tools/YUM.py b/src/lib/Bcfg2/Client/Tools/YUM.py index 57ca06e77..147615f47 100644 --- a/src/lib/Bcfg2/Client/Tools/YUM.py +++ b/src/lib/Bcfg2/Client/Tools/YUM.py @@ -133,10 +133,12 @@ class YUM(Bcfg2.Client.Tools.PkgTool): def __init__(self, config): self.yumbase = self._loadYumBase() Bcfg2.Client.Tools.PkgTool.__init__(self, config) - self.ignores = [entry.get('name') for struct in config \ - for entry in struct \ - if entry.tag == 'Path' and \ - entry.get('type') == 'ignore'] + self.ignores = [] + for struct in config: + self.ignores.extend([entry.get('name') + for entry in struct + if (entry.tag == 'Path' and + entry.get('type') == 'ignore')]) self.instance_status = {} self.extra_instances = [] self.modlists = {} @@ -296,8 +298,8 @@ class YUM(Bcfg2.Client.Tools.PkgTool): group. """ missing = Bcfg2.Client.Tools.PkgTool.missing_attrs(self, entry) - if entry.get('name', None) == None and \ - entry.get('group', None) == None: + if (entry.get('name', None) is None and + entry.get('group', None) is None): missing += ['name', 'group'] return missing @@ -425,10 +427,10 @@ class YUM(Bcfg2.Client.Tools.PkgTool): if entry.get('group'): self.logger.debug("Verifying packages for group %s" % - entry.get('group')) + entry.get('group')) else: self.logger.debug("Verifying package instances for %s" % - entry.get('name')) + entry.get('name')) self.verify_cache = dict() # Used for checking multilib packages self.modlists[entry] = modlist @@ -437,10 +439,10 @@ class YUM(Bcfg2.Client.Tools.PkgTool): package_fail = False qtext_versions = [] virt_pkg = False - pkg_checks = self.pkg_checks and \ - entry.get('pkg_checks', 'true').lower() == 'true' - pkg_verify = self.pkg_verify and \ - entry.get('pkg_verify', 'true').lower() == 'true' + pkg_checks = (self.pkg_checks and + entry.get('pkg_checks', 'true').lower() == 'true') + pkg_verify = (self.pkg_verify and + entry.get('pkg_verify', 'true').lower() == 'true') yum_group = False if entry.get('name') == 'gpg-pubkey': @@ -458,15 +460,13 @@ class YUM(Bcfg2.Client.Tools.PkgTool): if d] group_type = entry.get('choose', 'default') if group_type in ['default', 'optional', 'all']: - group_packages += [p - for p, d in - group.default_packages.items() - if d] + group_packages += [ + p for p, d in group.default_packages.items() + if d] if group_type in ['optional', 'all']: - group_packages += [p - for p, d in - group.optional_packages.items() - if d] + group_packages += [ + p for p, d in group.optional_packages.items() + if d] if len(group_packages) == 0: self.logger.error("No packages found for group %s" % entry.get("group")) @@ -492,7 +492,7 @@ class YUM(Bcfg2.Client.Tools.PkgTool): else: all_pkg_objs = \ self.yumbase.rpmdb.searchNevra(name=entry.get('name')) - if len(all_pkg_objs) == 0 and yum_group != True: + if len(all_pkg_objs) == 0 and yum_group is not True: # Some sort of virtual capability? Try to resolve it all_pkg_objs = self.yumbase.rpmdb.searchProvides(entry.get('name')) if len(all_pkg_objs) > 0: @@ -570,9 +570,9 @@ class YUM(Bcfg2.Client.Tools.PkgTool): pkg_objs = [po for po in all_pkg_objs] else: pkg_objs = [po for po in all_pkg_objs - if po.checkPrco('provides', - (nevra["name"], 'EQ', - tuple(vlist)))] + if po.checkPrco('provides', + (nevra["name"], 'EQ', + tuple(vlist)))] elif entry.get('name') == 'gpg-pubkey': if 'version' not in nevra: self.logger.warning("Skipping verify: gpg-pubkey without " @@ -625,7 +625,7 @@ class YUM(Bcfg2.Client.Tools.PkgTool): if self.setup.get('quick', False): # Passed -q on the command line continue - if not (pkg_verify and \ + if not (pkg_verify and inst.get('pkg_verify', 'true').lower() == 'true'): continue @@ -651,8 +651,8 @@ class YUM(Bcfg2.Client.Tools.PkgTool): # Now take out the Yum specific objects / modlists / unproblems ignores = [ig.get('name') for ig in entry.findall('Ignore')] + \ - [ig.get('name') for ig in inst.findall('Ignore')] + \ - self.ignores + [ig.get('name') for ig in inst.findall('Ignore')] + \ + self.ignores for fname, probs in list(vrfy_result.items()): if fname in modlist: self.logger.debug(" %s in modlist, skipping" % fname) @@ -740,8 +740,9 @@ class YUM(Bcfg2.Client.Tools.PkgTool): for pkg in pkg_objs: self.logger.debug(" Extra Instance Found: %s" % str(pkg)) Bcfg2.Client.XML.SubElement(extra_entry, 'Instance', - epoch=pkg.epoch, name=pkg.name, version=pkg.version, - release=pkg.release, arch=pkg.arch) + epoch=pkg.epoch, name=pkg.name, + version=pkg.version, + release=pkg.release, arch=pkg.arch) if pkg_objs == []: return None @@ -785,7 +786,7 @@ class YUM(Bcfg2.Client.Tools.PkgTool): ver = yum.misc.keyIdToRPMVer(gpg['keyid']) rel = yum.misc.keyIdToRPMVer(gpg['timestamp']) if not (ver == inst.get('version') and rel == inst.get('release')): - self.logger.info("GPG key file %s does not match gpg-pubkey-%s-%s"\ + self.logger.info("GPG key file %s does not match gpg-pubkey-%s-%s" % (key_file, inst.get('version'), inst.get('release'))) return False @@ -794,20 +795,21 @@ class YUM(Bcfg2.Client.Tools.PkgTool): gpg['timestamp']) == 0: result = tset.pgpImportPubkey(yum.misc.procgpgkey(rawkey)) else: - self.logger.debug("gpg-pubkey-%s-%s already installed"\ - % (inst.get('version'), - inst.get('release'))) + self.logger.debug("gpg-pubkey-%s-%s already installed" % + (inst.get('version'), inst.get('release'))) return True if result != 0: - self.logger.debug("Unable to install %s-%s" % \ - (self.instance_status[inst].get('pkg').get('name'), - nevra2string(inst))) + self.logger.debug( + "Unable to install %s-%s" % + (self.instance_status[inst].get('pkg').get('name'), + nevra2string(inst))) return False else: - self.logger.debug("Installed %s-%s-%s" % \ - (self.instance_status[inst].get('pkg').get('name'), - inst.get('version'), inst.get('release'))) + self.logger.debug( + "Installed %s-%s-%s" % + (self.instance_status[inst].get('pkg').get('name'), + inst.get('version'), inst.get('release'))) return True def _runYumTransaction(self): @@ -902,7 +904,7 @@ class YUM(Bcfg2.Client.Tools.PkgTool): # Remove extra instances. # Can not reverify because we don't have a package entry. if self.extra_instances is not None and len(self.extra_instances) > 0: - if (self.setup.get('remove') == 'all' or \ + if (self.setup.get('remove') == 'all' or self.setup.get('remove') == 'packages'): self.Remove(self.extra_instances) else: @@ -917,7 +919,7 @@ class YUM(Bcfg2.Client.Tools.PkgTool): # Figure out which instances of the packages actually need something # doing to them and place in the appropriate work 'queue'. for pkg in packages: - insts = [pinst for pinst in pkg \ + insts = [pinst for pinst in pkg if pinst.tag in ['Instance', 'Package']] if insts: for inst in insts: @@ -1010,10 +1012,11 @@ class YUM(Bcfg2.Client.Tools.PkgTool): if not self.setup['kevlar']: for pkg_entry in [p for p in packages if self.canVerify(p)]: - self.logger.debug("Reverifying Failed Package %s" \ - % (pkg_entry.get('name'))) - states[pkg_entry] = self.VerifyPackage(pkg_entry, - self.modlists.get(pkg_entry, [])) + self.logger.debug("Reverifying Failed Package %s" % + pkg_entry.get('name')) + states[pkg_entry] = \ + self.VerifyPackage(pkg_entry, + self.modlists.get(pkg_entry, [])) self.modified.extend(ent for ent in packages if states[ent]) return states diff --git a/src/lib/Bcfg2/Client/__init__.py b/src/lib/Bcfg2/Client/__init__.py index dd5ae1e83..e40ef750b 100644 --- a/src/lib/Bcfg2/Client/__init__.py +++ b/src/lib/Bcfg2/Client/__init__.py @@ -1,7 +1,5 @@ """This contains all Bcfg2 Client modules""" -__all__ = ["Frame", "Tools", "XML", "Client"] - import os import sys import select diff --git a/src/lib/Bcfg2/Compat.py b/src/lib/Bcfg2/Compat.py index 44c76303c..4e9239e26 100644 --- a/src/lib/Bcfg2/Compat.py +++ b/src/lib/Bcfg2/Compat.py @@ -10,7 +10,7 @@ Python 2.4 and such-like """ import sys -# pylint: disable=E0611,W0611,W0622,C0103 +# pylint: disable=E0601,E0602,E0611,W0611,W0622,C0103 try: from email.Utils import formatdate @@ -270,3 +270,11 @@ try: except NameError: # longs are just ints in py3k long = int + + +try: + cmp = cmp +except NameError: + def cmp(a, b): + """ Py3k implementation of cmp() """ + return (a > b) - (a < b) diff --git a/src/lib/Bcfg2/Logger.py b/src/lib/Bcfg2/Logger.py index 5bbc9ff96..e537b6148 100644 --- a/src/lib/Bcfg2/Logger.py +++ b/src/lib/Bcfg2/Logger.py @@ -144,9 +144,9 @@ def add_console_handler(level=logging.DEBUG): # tell the handler to use this format console.setFormatter(TermiosFormatter()) try: - console.set_name("console") + console.set_name("console") # pylint: disable=E1101 except AttributeError: - console.name = "console" + console.name = "console" # pylint: disable=W0201 logging.root.addHandler(console) @@ -162,9 +162,9 @@ def add_syslog_handler(procname, syslog_facility, level=logging.DEBUG): ('localhost', 514), syslog_facility) try: - syslog.set_name("syslog") + syslog.set_name("syslog") # pylint: disable=E1101 except AttributeError: - syslog.name = "syslog" + syslog.name = "syslog" # pylint: disable=W0201 syslog.setLevel(level) syslog.setFormatter( logging.Formatter('%(name)s[%(process)d]: %(message)s')) @@ -179,9 +179,9 @@ def add_file_handler(to_file, level=logging.DEBUG): """Add a logging handler that logs to to_file.""" filelog = logging.FileHandler(to_file) try: - filelog.set_name("file") + filelog.set_name("file") # pylint: disable=E1101 except AttributeError: - filelog.name = "file" + filelog.name = "file" # pylint: disable=W0201 filelog.setLevel(level) filelog.setFormatter( logging.Formatter('%(asctime)s %(name)s[%(process)d]: %(message)s')) @@ -197,7 +197,7 @@ def setup_logging(procname, to_console=True, to_syslog=True, params = [] if to_console: - if to_console == True: + if to_console is True: to_console = logging.WARNING if level == 0: clvl = to_console diff --git a/src/lib/Bcfg2/Options.py b/src/lib/Bcfg2/Options.py index e5aeccf4d..67dcf901e 100644 --- a/src/lib/Bcfg2/Options.py +++ b/src/lib/Bcfg2/Options.py @@ -308,14 +308,14 @@ def list_split(c_string): """ split an option string on commas, optionally surrounded by whitespace, returning a list """ if c_string: - return re.split("\s*,\s*", c_string) + return re.split(r'\s*,\s*', c_string) return [] def colon_split(c_string): """ split an option string on colons, returning a list """ if c_string: - return c_string.split(':') + return c_string.split(r':') return [] @@ -349,7 +349,7 @@ def get_size(value): '512m', '2g'), get the absolute number of bytes as an integer """ if value == -1: return value - mat = re.match("(\d+)([KkMmGg])?", value) + mat = re.match(r'(\d+)([KkMmGg])?', value) if not mat: raise ValueError("Not a valid size", value) rvalue = int(mat.group(1)) @@ -395,7 +395,8 @@ CFILE = \ Option('Specify configuration file', default=DEFAULT_CONFIG_LOCATION, cmd='-C', - odesc='<conffile>') + odesc='<conffile>', + env="BCFG2_CONFIG") LOCKFILE = \ Option('Specify lockfile', default='/var/lock/bcfg2.run', @@ -1014,6 +1015,15 @@ VERBOSE = \ cmd='-v', cook=get_bool, cf=('logging', 'verbose')) +LOG_PERFORMANCE = \ + Option("Periodically log performance statistics", + default=False, + cf=('logging', 'performance')) +PERFLOG_INTERVAL = \ + Option("Performance statistics logging interval in seconds", + default=300.0, + cook=get_timeout, + cf=('logging', 'performance_interval')) # Plugin-specific options CFG_VALIDATION = \ @@ -1097,7 +1107,9 @@ SERVER_COMMON_OPTIONS = dict(repo=SERVER_REPOSITORY, web_configfile=WEB_CFILE, backend=SERVER_BACKEND, vcs_root=SERVER_VCS_ROOT, - authentication=SERVER_AUTHENTICATION) + authentication=SERVER_AUTHENTICATION, + perflog=LOG_PERFORMANCE, + perflog_interval=PERFLOG_INTERVAL) CRYPT_OPTIONS = dict(encrypt=ENCRYPT, decrypt=DECRYPT, @@ -1206,6 +1218,7 @@ INFO_COMMON_OPTIONS = dict(ppath=PARANOID_PATH, INFO_COMMON_OPTIONS.update(CLI_COMMON_OPTIONS) INFO_COMMON_OPTIONS.update(SERVER_COMMON_OPTIONS) + class OptionParser(OptionSet): """ OptionParser bootstraps option parsing, getting the value of the config file. This should only be instantiated by diff --git a/src/lib/Bcfg2/Reporting/Transport/LocalFilesystem.py b/src/lib/Bcfg2/Reporting/Transport/LocalFilesystem.py index 0a0f032e5..c7d5c512a 100644 --- a/src/lib/Bcfg2/Reporting/Transport/LocalFilesystem.py +++ b/src/lib/Bcfg2/Reporting/Transport/LocalFilesystem.py @@ -87,7 +87,7 @@ class LocalFilesystem(TransportBase): # using a tmpfile to hopefully avoid the file monitor from grabbing too # soon - saved = open(tmp_file, 'w') + saved = open(tmp_file, 'wb') try: saved.write(payload) except IOError: @@ -123,7 +123,7 @@ class LocalFilesystem(TransportBase): self.debug_log("Handling event %s" % event.filename) payload = os.path.join(self.work_path, event.filename) try: - payloadfd = open(payload, "r") + payloadfd = open(payload, "rb") interaction = cPickle.load(payloadfd) payloadfd.close() os.unlink(payload) diff --git a/src/lib/Bcfg2/Reporting/models.py b/src/lib/Bcfg2/Reporting/models.py index 4be509f53..e63c180a8 100644 --- a/src/lib/Bcfg2/Reporting/models.py +++ b/src/lib/Bcfg2/Reporting/models.py @@ -139,9 +139,11 @@ class Interaction(models.Model): posixgroups = models.ManyToManyField("POSIXGroupEntry") failures = models.ManyToManyField("FailureEntry") - entry_types = ('actions', 'packages', 'paths', 'services', 'sebooleans', - 'seports', 'sefcontexts', 'senodes', 'selogins', 'seusers', - 'seinterfaces', 'sepermissives', 'semodules', 'posixusers', + entry_types = ('actions', 'failures', 'packages', + 'paths', 'services', 'sebooleans', + 'seports', 'sefcontexts', 'senodes', + 'selogins', 'seusers', 'seinterfaces', + 'sepermissives', 'semodules', 'posixusers', 'posixgroups') # Formerly InteractionMetadata diff --git a/src/lib/Bcfg2/Server/Admin/Init.py b/src/lib/Bcfg2/Server/Admin/Init.py index 884405786..870a31480 100644 --- a/src/lib/Bcfg2/Server/Admin/Init.py +++ b/src/lib/Bcfg2/Server/Admin/Init.py @@ -231,8 +231,8 @@ class Init(Bcfg2.Server.Admin.Mode): def _prompt_password(self): """Ask for a password or generate one if none is provided.""" newpassword = getpass.getpass( - "Input password used for communication verification " - "(without echoing; leave blank for a random): ").strip() + "Input password used for communication verification " + "(without echoing; leave blank for a random): ").strip() if len(newpassword) != 0: self.data['password'] = newpassword diff --git a/src/lib/Bcfg2/Server/Admin/Minestruct.py b/src/lib/Bcfg2/Server/Admin/Minestruct.py index 6d0dab106..93e42305c 100644 --- a/src/lib/Bcfg2/Server/Admin/Minestruct.py +++ b/src/lib/Bcfg2/Server/Admin/Minestruct.py @@ -3,6 +3,7 @@ import getopt import lxml.etree import sys import Bcfg2.Server.Admin +from Bcfg2.Server.Plugin import PullSource class Minestruct(Bcfg2.Server.Admin.StructureMode): @@ -39,12 +40,12 @@ class Minestruct(Bcfg2.Server.Admin.StructureMode): try: extra = set() - for source in self.bcore.pull_sources: + for source in self.bcore.plugins_by_type(PullSource): for item in source.GetExtra(client): extra.add(item) except: self.log.error("Failed to find extra entry info for client %s" % - client) + client) raise SystemExit(1) root = lxml.etree.Element("Base") self.log.info("Found %d extra entries" % (len(extra))) diff --git a/src/lib/Bcfg2/Server/Admin/Perf.py b/src/lib/Bcfg2/Server/Admin/Perf.py index a7e67c956..1a772e6fc 100644 --- a/src/lib/Bcfg2/Server/Admin/Perf.py +++ b/src/lib/Bcfg2/Server/Admin/Perf.py @@ -31,8 +31,8 @@ class Perf(Bcfg2.Server.Admin.Mode): timeout=setup['timeout']) data = proxy.get_statistics() for key in sorted(data.keys()): - output.append((key, ) + - tuple(["%.06f" % item - for item in data[key][:-1]] + \ - [data[key][-1]])) + output.append( + (key, ) + + tuple(["%.06f" % item + for item in data[key][:-1]] + [data[key][-1]])) self.print_table(output) diff --git a/src/lib/Bcfg2/Server/Admin/Pull.py b/src/lib/Bcfg2/Server/Admin/Pull.py index 1905fac3c..e883c432f 100644 --- a/src/lib/Bcfg2/Server/Admin/Pull.py +++ b/src/lib/Bcfg2/Server/Admin/Pull.py @@ -6,6 +6,7 @@ import sys import getopt import select import Bcfg2.Server.Admin +from Bcfg2.Server.Plugin import PullSource, Generator from Bcfg2.Compat import input # pylint: disable=W0622 @@ -62,13 +63,14 @@ class Pull(Bcfg2.Server.Admin.MetadataCore): given client/entry from statistics. """ new_entry = {'type': etype, 'name': ename} - for plugin in self.bcore.pull_sources: + pull_sources = self.bcore.plugins_by_type(PullSource) + for plugin in pull_sources: try: (owner, group, mode, contents) = \ - plugin.GetCurrentEntry(client, etype, ename) + plugin.GetCurrentEntry(client, etype, ename) break except Bcfg2.Server.Plugin.PluginExecutionError: - if plugin == self.bcore.pull_sources[-1]: + if plugin == pull_sources[-1]: print("Pull Source failure; could not fetch current state") raise SystemExit(1) @@ -121,8 +123,8 @@ class Pull(Bcfg2.Server.Admin.MetadataCore): meta = self.bcore.build_metadata(client) # Find appropriate plugin in bcore - glist = [gen for gen in self.bcore.generators if - ename in gen.Entries.get(etype, {})] + glist = [gen for gen in self.bcore.plugins_by_type(Generator) + if ename in gen.Entries.get(etype, {})] if len(glist) != 1: self.errExit("Got wrong numbers of matching generators for entry:" "%s" % ([g.name for g in glist])) diff --git a/src/lib/Bcfg2/Server/Admin/Xcmd.py b/src/lib/Bcfg2/Server/Admin/Xcmd.py index 6f411c2e4..ba4777c93 100644 --- a/src/lib/Bcfg2/Server/Admin/Xcmd.py +++ b/src/lib/Bcfg2/Server/Admin/Xcmd.py @@ -51,5 +51,5 @@ class Xcmd(Bcfg2.Server.Admin.Mode): print("Proxy Error: %s" % err) return - if data != None: + if data is not None: print(data) diff --git a/src/lib/Bcfg2/Server/Admin/__init__.py b/src/lib/Bcfg2/Server/Admin/__init__.py index 0c4764642..3fbdf8fa8 100644 --- a/src/lib/Bcfg2/Server/Admin/__init__.py +++ b/src/lib/Bcfg2/Server/Admin/__init__.py @@ -1,30 +1,14 @@ """ Base classes for admin modes """ -__all__ = [ - 'Backup', - 'Bundle', - 'Client', - 'Compare', - 'Group', - 'Init', - 'Minestruct', - 'Perf', - 'Pull', - 'Query', - 'Reports', - 'Syncdb', - 'Tidy', - 'Viz', - 'Xcmd' - ] - import re import sys import logging import lxml.etree import Bcfg2.Server.Core import Bcfg2.Options -from Bcfg2.Compat import ConfigParser +from Bcfg2.Compat import ConfigParser, walk_packages + +__all__ = [m[1] for m in walk_packages(path=__path__)] class Mode(object): @@ -104,15 +88,15 @@ class Mode(object): # Calculate column widths (longest item in each column # plus padding on both sides) cols = list(zip(*rows)) - col_widths = [max([len(str(item)) + 2 * padding for \ - item in col]) for col in cols] + col_widths = [max([len(str(item)) + 2 * padding + for item in col]) for col in cols] borderline = vdelim.join([w * hdelim for w in col_widths]) # Print out the table print(borderline) for row in rows: - print(vdelim.join([justify(str(item), width) for \ - (item, width) in zip(row, col_widths)])) + print(vdelim.join([justify(str(item), width) + for (item, width) in zip(row, col_widths)])) if hdr: print(borderline) hdr = False diff --git a/src/lib/Bcfg2/Server/BuiltinCore.py b/src/lib/Bcfg2/Server/BuiltinCore.py index 663ee6f92..48455819d 100644 --- a/src/lib/Bcfg2/Server/BuiltinCore.py +++ b/src/lib/Bcfg2/Server/BuiltinCore.py @@ -9,12 +9,12 @@ from Bcfg2.Server.Core import BaseCore, NoExposedMethod from Bcfg2.Compat import xmlrpclib, urlparse from Bcfg2.Server.SSLServer import XMLRPCServer -from lockfile import LockFailed +from lockfile import LockFailed, LockTimeout # pylint: disable=E0611 try: - from daemon.pidfile import PIDLockFile + from daemon.pidfile import TimeoutPIDLockFile except ImportError: - from daemon.pidlockfile import PIDLockFile + from daemon.pidlockfile import TimeoutPIDLockFile # pylint: enable=E0611 @@ -33,7 +33,8 @@ class Core(BaseCore): gid=self.setup['daemon_gid'], umask=int(self.setup['umask'], 8)) if self.setup['daemon']: - daemon_args['pidfile'] = PIDLockFile(self.setup['daemon']) + daemon_args['pidfile'] = TimeoutPIDLockFile(self.setup['daemon'], + acquire_timeout=5) #: The :class:`daemon.DaemonContext` used to drop #: privileges, write the PID file (with :class:`PidFile`), #: and daemonize this core. @@ -90,6 +91,11 @@ class Core(BaseCore): err = sys.exc_info()[1] self.logger.error("Failed to daemonize %s: %s" % (self.name, err)) return False + except LockTimeout: + err = sys.exc_info()[1] + self.logger.error("Failed to daemonize %s: Failed to acquire lock " + "on %s" % (self.name, self.setup['daemon'])) + return False def _run(self): """ Create :attr:`server` to start the server listening. """ diff --git a/src/lib/Bcfg2/Server/Core.py b/src/lib/Bcfg2/Server/Core.py index c69e8b055..b0cb4cc87 100644 --- a/src/lib/Bcfg2/Server/Core.py +++ b/src/lib/Bcfg2/Server/Core.py @@ -20,7 +20,9 @@ from itertools import chain from Bcfg2.Server.Cache import Cache from Bcfg2.Options import get_option_parser, SERVER_FAM_IGNORE from Bcfg2.Compat import xmlrpclib # pylint: disable=W0622 -from Bcfg2.Server.Plugin import PluginInitError, PluginExecutionError +from Bcfg2.Server.Plugin.exceptions import * # pylint: disable=W0401,W0614 +from Bcfg2.Server.Plugin.interfaces import * # pylint: disable=W0401,W0614 +from Bcfg2.Server.Plugin import track_statistics try: import psyco @@ -93,6 +95,7 @@ class BaseCore(object): .. automethod:: _block .. ----- .. automethod:: _file_monitor_thread + .. automethod:: _perflog_thread """ #: The Bcfg2 options dict self.setup = get_option_parser() @@ -179,6 +182,9 @@ class BaseCore(object): #: the first one loaded wins. self.plugin_blacklist = {} + #: The Metadata plugin + self.metadata = None + #: Revision of the Bcfg2 specification. This will be sent to #: the client in the configuration, and can be set by a #: :class:`Bcfg2.Server.Plugin.interfaces.Version` plugin. @@ -237,71 +243,6 @@ class BaseCore(object): self.logger.error("Failed to set ownership of database " "at %s: %s" % (db_settings['NAME'], err)) - if '' in self.setup['plugins']: - self.setup['plugins'].remove('') - - for plugin in self.setup['plugins']: - if not plugin in self.plugins: - self.init_plugin(plugin) - # Remove blacklisted plugins - for plugin, blacklist in list(self.plugin_blacklist.items()): - if len(blacklist) > 0: - self.logger.error("The following plugins conflict with %s;" - "Unloading %s" % (plugin, blacklist)) - for plug in blacklist: - del self.plugins[plug] - - # Log experimental plugins - expl = [plug for plug in list(self.plugins.values()) - if plug.experimental] - if expl: - self.logger.info("Loading experimental plugin(s): %s" % - (" ".join([x.name for x in expl]))) - self.logger.info("NOTE: Interfaces subject to change") - - # Log deprecated plugins - depr = [plug for plug in list(self.plugins.values()) - if plug.deprecated] - if depr: - self.logger.info("Loading deprecated plugin(s): %s" % - (" ".join([x.name for x in depr]))) - - # Find the metadata plugin and set self.metadata - mlist = self.plugins_by_type(Bcfg2.Server.Plugin.Metadata) - if len(mlist) >= 1: - #: The Metadata plugin - self.metadata = mlist[0] - if len(mlist) > 1: - self.logger.error("Multiple Metadata plugins loaded; " - "using %s" % self.metadata) - else: - self.logger.error("No Metadata plugin loaded; " - "failed to instantiate Core") - raise CoreInitError("No Metadata Plugin") - - #: The list of plugins that handle - #: :class:`Bcfg2.Server.Plugin.interfaces.Statistics` - self.statistics = self.plugins_by_type(Bcfg2.Server.Plugin.Statistics) - - #: The list of plugins that implement the - #: :class:`Bcfg2.Server.Plugin.interfaces.PullSource` - #: interface - self.pull_sources = \ - self.plugins_by_type(Bcfg2.Server.Plugin.PullSource) - - #: The list of - #: :class:`Bcfg2.Server.Plugin.interfaces.Generator` plugins - self.generators = self.plugins_by_type(Bcfg2.Server.Plugin.Generator) - - #: The list of plugins that handle - #: :class:`Bcfg2.Server.Plugin.interfaces.Structure` - #: generation - self.structures = self.plugins_by_type(Bcfg2.Server.Plugin.Structure) - - #: The list of plugins that implement the - #: :class:`Bcfg2.Server.Plugin.interfaces.Connector` interface - self.connectors = self.plugins_by_type(Bcfg2.Server.Plugin.Connector) - #: The CA that signed the server cert self.ca = self.setup['ca'] @@ -319,6 +260,12 @@ class BaseCore(object): threading.Thread(name="%sFAMThread" % self.setup['filemonitor'], target=self._file_monitor_thread) + self.perflog_thread = None + if self.setup['perflog']: + self.perflog_thread = \ + threading.Thread(name="PerformanceLoggingThread", + target=self._perflog_thread) + #: A :func:`threading.Lock` for use by #: :func:`Bcfg2.Server.FileMonitor.FileMonitor.handle_event_set` self.lock = threading.Lock() @@ -327,10 +274,6 @@ class BaseCore(object): #: metadata self.metadata_cache = Cache() - if self.debug_flag: - # enable debugging on everything else. - self.plugins[plugin].set_debug(self.debug_flag) - def plugins_by_type(self, base_cls): """ Return a list of loaded plugins that match the passed type. @@ -351,11 +294,23 @@ class BaseCore(object): if isinstance(plugin, base_cls)], key=lambda p: (p.sort_order, p.name)) + def _perflog_thread(self): + """ The thread that periodically logs performance statistics + to syslog. """ + self.logger.debug("Performance logging thread starting") + while not self.terminate.isSet(): + self.terminate.wait(self.setup['perflog_interval']) + for name, stats in self.get_statistics(None).items(): + self.logger.info("Performance statistics: " + "%s min=%.06f, max=%.06f, average=%.06f, " + "count=%d" % ((name, ) + stats)) + def _file_monitor_thread(self): """ The thread that runs the :class:`Bcfg2.Server.FileMonitor.FileMonitor`. This also queries :class:`Bcfg2.Server.Plugin.interfaces.Version` plugins for the current revision of the Bcfg2 repo. """ + self.logger.debug("File monitor thread starting") famfd = self.fam.fileno() terminate = self.terminate while not terminate.isSet(): @@ -374,7 +329,7 @@ class BaseCore(object): def _update_vcs_revision(self): """ Update the revision of the current configuration on-disk from the VCS plugin """ - for plugin in self.plugins_by_type(Bcfg2.Server.Plugin.Version): + for plugin in self.plugins_by_type(Version): try: newrev = plugin.get_revision() if newrev != self.revision: @@ -386,6 +341,59 @@ class BaseCore(object): (plugin.name, sys.exc_info()[1])) self.revision = '-1' + def load_plugins(self): + """ Load all plugins, setting + :attr:`Bcfg2.Server.Core.BaseCore.plugins` and + :attr:`Bcfg2.Server.Core.BaseCore.metadata` as side effects. + This does not start plugin threads; that is done later, in + :func:`Bcfg2.Server.Core.BaseCore.run` """ + while '' in self.setup['plugins']: + self.setup['plugins'].remove('') + + for plugin in self.setup['plugins']: + if not plugin in self.plugins: + self.init_plugin(plugin) + + # Remove blacklisted plugins + for plugin, blacklist in list(self.plugin_blacklist.items()): + if len(blacklist) > 0: + self.logger.error("The following plugins conflict with %s;" + "Unloading %s" % (plugin, blacklist)) + for plug in blacklist: + del self.plugins[plug] + + # Log deprecated and experimental plugins + expl = [] + depr = [] + for plug in list(self.plugins.values()): + if plug.experimental: + expl.append(plug) + if plug.deprecated: + depr.append(plug) + if expl: + self.logger.info("Loading experimental plugin(s): %s" % + (" ".join([x.name for x in expl]))) + self.logger.info("NOTE: Interfaces subject to change") + if depr: + self.logger.info("Loading deprecated plugin(s): %s" % + (" ".join([x.name for x in depr]))) + + # Find the metadata plugin and set self.metadata + mlist = self.plugins_by_type(Metadata) + if len(mlist) >= 1: + self.metadata = mlist[0] + if len(mlist) > 1: + self.logger.error("Multiple Metadata plugins loaded; using %s" + % self.metadata) + else: + self.logger.error("No Metadata plugin loaded; " + "failed to instantiate Core") + raise CoreInitError("No Metadata Plugin") + + if self.debug_flag: + # enable debugging on plugins + self.plugins[plugin].set_debug(self.debug_flag) + def init_plugin(self, plugin): """ Import and instantiate a single plugin. The plugin is stored to :attr:`plugins`. @@ -399,7 +407,7 @@ class BaseCore(object): self.logger.debug("Loading plugin %s" % plugin) try: mod = getattr(__import__("Bcfg2.Server.Plugins.%s" % - (plugin)).Server.Plugins, plugin) + (plugin)).Server.Plugins, plugin) except ImportError: try: mod = __import__(plugin, globals(), locals(), @@ -422,6 +430,10 @@ class BaseCore(object): except PluginInitError: self.logger.error("Failed to instantiate plugin %s" % plugin, exc_info=1) + except OSError: + err = sys.exc_info()[1] + self.logger.error("Failed to add a file monitor while " + "instantiating plugin %s: %s" % (plugin, err)) except: self.logger.error("Unexpected instantiation failure for plugin %s" % plugin, exc_info=1) @@ -468,8 +480,7 @@ class BaseCore(object): metadata.hostname)) start = time.time() try: - for plugin in \ - self.plugins_by_type(Bcfg2.Server.Plugin.ClientRunHooks): + for plugin in self.plugins_by_type(ClientRunHooks): try: getattr(plugin, hook)(metadata) except AttributeError: @@ -500,11 +511,10 @@ class BaseCore(object): :type data: list of lxml.etree._Element objects """ self.logger.debug("Validating structures for %s" % metadata.hostname) - for plugin in \ - self.plugins_by_type(Bcfg2.Server.Plugin.StructureValidator): + for plugin in self.plugins_by_type(StructureValidator): try: plugin.validate_structures(metadata, data) - except Bcfg2.Server.Plugin.ValidationError: + except ValidationError: err = sys.exc_info()[1] self.logger.error("Plugin %s structure validation failed: %s" % (plugin.name, err)) @@ -527,10 +537,10 @@ class BaseCore(object): :type data: list of lxml.etree._Element objects """ self.logger.debug("Validating goals for %s" % metadata.hostname) - for plugin in self.plugins_by_type(Bcfg2.Server.Plugin.GoalValidator): + for plugin in self.plugins_by_type(GoalValidator): try: plugin.validate_goals(metadata, data) - except Bcfg2.Server.Plugin.ValidationError: + except ValidationError: err = sys.exc_info()[1] self.logger.error("Plugin %s goal validation failed: %s" % (plugin.name, err.message)) @@ -548,8 +558,9 @@ class BaseCore(object): :returns: list of :class:`lxml.etree._Element` objects """ self.logger.debug("Getting structures for %s" % metadata.hostname) - structures = list(chain(*[struct.BuildStructures(metadata) - for struct in self.structures])) + structures = list( + chain(*[struct.BuildStructures(metadata) + for struct in self.plugins_by_type(Structure)])) sbundles = [b.get('name') for b in structures if b.tag == 'Bundle'] missing = [b for b in metadata.bundles if b not in sbundles] if missing: @@ -634,8 +645,9 @@ class BaseCore(object): self.logger.error("Falling back to %s:%s" % (entry.tag, entry.get('name'))) - glist = [gen for gen in self.generators if - entry.get('name') in gen.Entries.get(entry.tag, {})] + generators = self.plugins_by_type(Generator) + glist = [gen for gen in generators + if entry.get('name') in gen.Entries.get(entry.tag, {})] if len(glist) == 1: return glist[0].Entries[entry.tag][entry.get('name')](entry, metadata) @@ -643,8 +655,8 @@ class BaseCore(object): generators = ", ".join([gen.name for gen in glist]) self.logger.error("%s %s served by multiple generators: %s" % (entry.tag, entry.get('name'), generators)) - g2list = [gen for gen in self.generators if - gen.HandlesEntry(entry, metadata)] + g2list = [gen for gen in generators + if gen.HandlesEntry(entry, metadata)] try: if len(g2list) == 1: return g2list[0].HandleEntry(entry, metadata) @@ -671,7 +683,7 @@ class BaseCore(object): revision=self.revision) try: meta = self.build_metadata(client) - except Bcfg2.Server.Plugin.MetadataConsistencyError: + except MetadataConsistencyError: self.logger.error("Metadata consistency error for client %s" % client) return lxml.etree.Element("error", type='metadata error') @@ -718,7 +730,8 @@ class BaseCore(object): :type event: Bcfg2.Server.FileMonitor.Event """ if event.filename != self.cfile: - print("Got event for unknown file: %s" % event.filename) + self.logger.error("Got event for unknown file: %s" % + event.filename) return if event.code2str() == 'deleted': return @@ -755,11 +768,15 @@ class BaseCore(object): return False try: + self.load_plugins() + self.fam.start() self.fam_thread.start() self.fam.AddMonitor(self.cfile, self) + if self.perflog_thread is not None: + self.perflog_thread.start() - for plug in self.plugins_by_type(Bcfg2.Server.Plugin.Threaded): + for plug in self.plugins_by_type(Threaded): plug.start_threads() except: self.shutdown() @@ -801,7 +818,7 @@ class BaseCore(object): """ self.logger.debug("Getting decision list for %s" % metadata.hostname) result = [] - for plugin in self.plugins_by_type(Bcfg2.Server.Plugin.Decision): + for plugin in self.plugins_by_type(Decision): try: result.extend(plugin.GetDecisions(metadata, mode)) except: @@ -862,7 +879,7 @@ class BaseCore(object): """ if not hasattr(self, 'metadata'): # some threads start before metadata is even loaded - raise Bcfg2.Server.Plugin.MetadataRuntimeError + raise MetadataRuntimeError("Metadata not loaded yet") if self.metadata_cache_mode == 'initial': # the Metadata plugin handles loading the cached data if # we're only caching the initial metadata object @@ -872,10 +889,11 @@ class BaseCore(object): if not imd: self.logger.debug("Building metadata for %s" % client_name) imd = self.metadata.get_initial_metadata(client_name) - for conn in self.connectors: + connectors = self.plugins_by_type(Connector) + for conn in connectors: grps = conn.get_additional_groups(imd) self.metadata.merge_additional_groups(imd, grps) - for conn in self.connectors: + for conn in connectors: data = conn.get_additional_data(imd) self.metadata.merge_additional_data(imd, conn.name, data) imd.query.by_name = self.build_metadata @@ -896,7 +914,7 @@ class BaseCore(object): meta = self.build_metadata(client_name) state = statistics.find(".//Statistics") if state.get('version') >= '2.0': - for plugin in self.statistics: + for plugin in self.plugins_by_type(Statistics): try: plugin.process_statistics(meta, statistics) except: @@ -938,11 +956,11 @@ class BaseCore(object): meta = self.build_metadata(client) else: meta = None - except Bcfg2.Server.Plugin.MetadataConsistencyError: + except MetadataConsistencyError: err = sys.exc_info()[1] self.critical_error("Client metadata resolution error for %s: %s" % (address[0], err)) - except Bcfg2.Server.Plugin.MetadataRuntimeError: + except MetadataRuntimeError: err = sys.exc_info()[1] self.critical_error('Metadata system runtime failure for %s: %s' % (address[0], err)) @@ -1038,8 +1056,7 @@ class BaseCore(object): version)) try: self.metadata.set_version(client, version) - except (Bcfg2.Server.Plugin.MetadataConsistencyError, - Bcfg2.Server.Plugin.MetadataRuntimeError): + except (MetadataConsistencyError, MetadataRuntimeError): err = sys.exc_info()[1] self.critical_error("Unable to set version for %s: %s" % (client, err)) @@ -1059,7 +1076,7 @@ class BaseCore(object): client, metadata = self.resolve_client(address, cleanup_cache=True) self.logger.debug("Getting probes for %s" % client) try: - for plugin in self.plugins_by_type(Bcfg2.Server.Plugin.Probing): + for plugin in self.plugins_by_type(Probing): for probe in plugin.GetProbes(metadata): resp.append(probe) return lxml.etree.tostring(resp, @@ -1129,11 +1146,10 @@ class BaseCore(object): self.logger.debug("%s sets its profile to %s" % (client, profile)) try: self.metadata.set_profile(client, profile, address) - except (Bcfg2.Server.Plugin.MetadataConsistencyError, - Bcfg2.Server.Plugin.MetadataRuntimeError): + except (MetadataConsistencyError, MetadataRuntimeError): err = sys.exc_info()[1] self.critical_error("Unable to assert profile for %s: %s" % - (client, err)) + (client, err)) return True @exposed @@ -1152,7 +1168,7 @@ class BaseCore(object): config = self.BuildConfiguration(client) return lxml.etree.tostring(config, xml_declaration=False).decode('UTF-8') - except Bcfg2.Server.Plugin.MetadataConsistencyError: + except MetadataConsistencyError: self.critical_error("Metadata consistency failure for %s" % client) @exposed diff --git a/src/lib/Bcfg2/Server/Encryption.py b/src/lib/Bcfg2/Server/Encryption.py index b46337eb0..ee1c1cab9 100755 --- a/src/lib/Bcfg2/Server/Encryption.py +++ b/src/lib/Bcfg2/Server/Encryption.py @@ -21,7 +21,7 @@ DECRYPT = 0 #: Default initialization vector. For best security, you should use a #: unique IV for each message. :func:`ssl_encrypt` does this in an #: automated fashion. -IV = '\0' * 16 +IV = r'\0' * 16 #: The config file section encryption options and passphrases are #: stored in @@ -121,9 +121,11 @@ def ssl_decrypt(data, passwd, algorithm=ALGORITHM): # base64-decode the data data = b64decode(data) salt = data[8:16] + # pylint: disable=E1101 hashes = [md5(passwd + salt).digest()] for i in range(1, 3): hashes.append(md5(hashes[i - 1] + passwd + salt).digest()) + # pylint: enable=E1101 key = hashes[0] + hashes[1] iv = hashes[2] @@ -149,9 +151,11 @@ def ssl_encrypt(plaintext, passwd, algorithm=ALGORITHM, salt=None): if salt is None: salt = Rand.rand_bytes(8) + # pylint: disable=E1101 hashes = [md5(passwd + salt).digest()] for i in range(1, 3): hashes.append(md5(hashes[i - 1] + passwd + salt).digest()) + # pylint: enable=E1101 key = hashes[0] + hashes[1] iv = hashes[2] diff --git a/src/lib/Bcfg2/Server/FileMonitor/Inotify.py b/src/lib/Bcfg2/Server/FileMonitor/Inotify.py index cdd52dbb9..2cdf27ed8 100644 --- a/src/lib/Bcfg2/Server/FileMonitor/Inotify.py +++ b/src/lib/Bcfg2/Server/FileMonitor/Inotify.py @@ -3,14 +3,11 @@ support. """ import os import errno -import logging import pyinotify from Bcfg2.Compat import reduce # pylint: disable=W0622 from Bcfg2.Server.FileMonitor import Event from Bcfg2.Server.FileMonitor.Pseudo import Pseudo -LOGGER = logging.getLogger(__name__) - class Inotify(Pseudo, pyinotify.ProcessEvent): """ File monitor backend with `inotify @@ -123,8 +120,9 @@ class Inotify(Pseudo, pyinotify.ProcessEvent): try: watch = self.watchmgr.watches[ievent.wd] except KeyError: - LOGGER.error("Error handling event %s for %s: Watch %s not found" % - (action, ievent.pathname, ievent.wd)) + self.logger.error("Error handling event %s for %s: " + "Watch %s not found" % + (action, ievent.pathname, ievent.wd)) return # FAM-style file monitors return the full path to the parent # directory that is being watched, relative paths to anything diff --git a/src/lib/Bcfg2/Server/FileMonitor/Pseudo.py b/src/lib/Bcfg2/Server/FileMonitor/Pseudo.py index 24cd099d0..b1e1adab7 100644 --- a/src/lib/Bcfg2/Server/FileMonitor/Pseudo.py +++ b/src/lib/Bcfg2/Server/FileMonitor/Pseudo.py @@ -24,6 +24,6 @@ class Pseudo(FileMonitor): self.events.append(Event(handleID, fname, 'exists')) self.events.append(Event(handleID, path, 'endExist')) - if obj != None: + if obj is not None: self.handles[handleID] = obj return handleID diff --git a/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py b/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py index 497e8fac6..7a2fd3fe9 100644 --- a/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py +++ b/src/lib/Bcfg2/Server/Lint/RequiredAttrs.py @@ -4,9 +4,14 @@ verified with an XML schema alone""" import os import re import Bcfg2.Server.Lint -import Bcfg2.Client.Tools.POSIX import Bcfg2.Client.Tools.VCS from Bcfg2.Server.Plugins.Packages import Apt, Yum +from Bcfg2.Client.Tools.POSIX.base import device_map +try: + from Bcfg2.Server.Plugins.Bundler import BundleTemplateFile + HAS_GENSHI = True +except ImportError: + HAS_GENSHI = False # format verifying functions @@ -53,10 +58,10 @@ class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin): Bcfg2.Server.Lint.ServerPlugin.__init__(self, *args, **kwargs) self.required_attrs = dict( Path=dict( - device=dict(name=is_filename, owner=is_username, + device=dict(name=is_filename, + owner=is_username, group=is_username, - dev_type=lambda v: \ - v in Bcfg2.Client.Tools.POSIX.base.device_map), + dev_type=lambda v: v in device_map), directory=dict(name=is_filename, owner=is_username, group=is_username, mode=is_octal_mode), file=dict(name=is_filename, owner=is_username, @@ -81,21 +86,21 @@ class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin): command=None)}, ACL=dict( default=dict(scope=lambda v: v in ['user', 'group'], - perms=lambda v: re.match('^([0-7]|[rwx\-]{0,3}', + perms=lambda v: re.match(r'^([0-7]|[rwx\-]{0,3}', v)), access=dict(scope=lambda v: v in ['user', 'group'], - perms=lambda v: re.match('^([0-7]|[rwx\-]{0,3}', + perms=lambda v: re.match(r'^([0-7]|[rwx\-]{0,3}', v)), - mask=dict(perms=lambda v: re.match('^([0-7]|[rwx\-]{0,3}', + mask=dict(perms=lambda v: re.match(r'^([0-7]|[rwx\-]{0,3}', v))), Package={"__any__": dict(name=None)}, SEBoolean={None: dict(name=None, value=lambda v: v in ['on', 'off'])}, SEModule={None: dict(name=None, __text__=None)}, - SEPort={None: - dict(name=lambda v: re.match(r'^\d+(-\d+)?/(tcp|udp)', - v), - selinuxtype=is_selinux_type)}, + SEPort={ + None: dict(name=lambda v: re.match(r'^\d+(-\d+)?/(tcp|udp)', + v), + selinuxtype=is_selinux_type)}, SEFcontext={None: dict(name=None, selinuxtype=is_selinux_type)}, SENode={None: dict(name=lambda v: "/" in v, selinuxtype=is_selinux_type, @@ -110,8 +115,7 @@ class RequiredAttrs(Bcfg2.Server.Lint.ServerPlugin): SEPermissive={None: dict(name=is_selinux_type)}, POSIXGroup={None: dict(name=is_username)}, POSIXUser={None: dict(name=is_username)}, - MemberOf={None: dict(__text__=is_username)}, - ) + MemberOf={None: dict(__text__=is_username)}) def Run(self): self.check_packages() diff --git a/src/lib/Bcfg2/Server/Lint/Validate.py b/src/lib/Bcfg2/Server/Lint/Validate.py index dd45ac62e..946ef8270 100644 --- a/src/lib/Bcfg2/Server/Lint/Validate.py +++ b/src/lib/Bcfg2/Server/Lint/Validate.py @@ -39,7 +39,8 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin): "NagiosGen/config.xml": "nagiosgen.xsd", "FileProbes/config.xml": "fileprobes.xsd", "SSLCA/**/cert.xml": "sslca-cert.xsd", - "SSLCA/**/key.xml": "sslca-key.xsd" + "SSLCA/**/key.xml": "sslca-key.xsd", + "GroupLogic/groups.xml": "grouplogic.xsd" } self.filelists = {} @@ -83,17 +84,15 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin): else: self.LintError("properties-schema-not-found", "No schema found for %s" % filename) + # ensure that it at least parses + self.parse(filename) - def validate(self, filename, schemafile, schema=None): - """validate a file against the given lxml.etree.Schema. - return True on success, False on failure """ - if schema is None: - # if no schema object was provided, instantiate one - schema = self._load_schema(schemafile) - if not schema: - return False + def parse(self, filename): + """ Parse an XML file, raising the appropriate LintErrors if + it can't be parsed or read. Return the + lxml.etree._ElementTree parsed from the file. """ try: - datafile = lxml.etree.parse(filename) + return lxml.etree.parse(filename) except SyntaxError: result = self.cmd.run(["xmllint", filename]) self.LintError("xml-failed-to-parse", @@ -105,6 +104,15 @@ class Validate(Bcfg2.Server.Lint.ServerlessPlugin): "Failed to open file %s" % filename) return False + def validate(self, filename, schemafile, schema=None): + """validate a file against the given lxml.etree.Schema. + return True on success, False on failure """ + if schema is None: + # if no schema object was provided, instantiate one + schema = self._load_schema(schemafile) + if not schema: + return False + datafile = self.parse(filename) if not schema.validate(datafile): cmd = ["xmllint"] if self.files is None: diff --git a/src/lib/Bcfg2/Server/Plugin/base.py b/src/lib/Bcfg2/Server/Plugin/base.py index f7bc08717..ecd970b54 100644 --- a/src/lib/Bcfg2/Server/Plugin/base.py +++ b/src/lib/Bcfg2/Server/Plugin/base.py @@ -97,15 +97,21 @@ class Plugin(Debuggable): :param datastore: The path to the Bcfg2 repository on the filesystem :type datastore: string - :raises: :class:`Bcfg2.Server.Plugin.exceptions.PluginInitError` + :raises: :exc:`OSError` if adding a file monitor failed; + :class:`Bcfg2.Server.Plugin.exceptions.PluginInitError` + on other errors .. autoattribute:: Bcfg2.Server.Plugin.base.Debuggable.__rmi__ """ + Debuggable.__init__(self, name=self.name) self.Entries = {} self.core = core self.data = os.path.join(datastore, self.name) + if not os.path.exists(self.data): + self.logger.warning("%s: %s does not exist, creating" % + (self.name, self.data)) + os.makedirs(self.data) self.running = True - Debuggable.__init__(self, name=self.name) @classmethod def init_repo(cls, repo): @@ -125,5 +131,11 @@ class Plugin(Debuggable): self.debug_log("Shutting down %s plugin" % self.name) self.running = False + def set_debug(self, debug): + for entry in self.Entries.values(): + if isinstance(entry, Debuggable): + entry.set_debug(debug) + return Debuggable.set_debug(self, debug) + def __str__(self): return "%s Plugin" % self.__class__.__name__ diff --git a/src/lib/Bcfg2/Server/Plugin/helpers.py b/src/lib/Bcfg2/Server/Plugin/helpers.py index ded7dd8dc..e5ea5188a 100644 --- a/src/lib/Bcfg2/Server/Plugin/helpers.py +++ b/src/lib/Bcfg2/Server/Plugin/helpers.py @@ -30,7 +30,38 @@ try: except ImportError: HAS_DJANGO = False -LOGGER = logging.getLogger(__name__) +class track_statistics(object): # pylint: disable=C0103 + """ Decorator that tracks execution time for the given + :class:`Plugin` method with :mod:`Bcfg2.Statistics` for reporting + via ``bcfg2-admin perf`` """ + + def __init__(self, name=None): + """ + :param name: The name under which statistics for this function + will be tracked. By default, the name will be + the name of the function concatenated with the + name of the class the function is a member of. + :type name: string + """ + # if this is None, it will be set later during __call_ + self.name = name + + def __call__(self, func): + if self.name is None: + self.name = func.__name__ + + @wraps(func) + def inner(obj, *args, **kwargs): + """ The decorated function """ + name = "%s:%s" % (obj.__class__.__name__, self.name) + + start = time.time() + try: + return func(obj, *args, **kwargs) + finally: + Bcfg2.Statistics.stats.add_value(name, time.time() - start) + + return inner def removecomment(stream): @@ -188,7 +219,7 @@ class FileBacked(Debuggable): return "%s: %s" % (self.__class__.__name__, self.name) -class DirectoryBacked(object): +class DirectoryBacked(Debuggable): """ DirectoryBacked objects represent a directory that contains files, represented by objects of the type listed in :attr:`__child__`, and other directories recursively. It monitors @@ -219,7 +250,7 @@ class DirectoryBacked(object): .. ----- .. autoattribute:: __child__ """ - object.__init__(self) + Debuggable.__init__(self) self.data = os.path.normpath(data) self.fam = Bcfg2.Server.FileMonitor.get_fam() @@ -238,11 +269,29 @@ class DirectoryBacked(object): self.handles = {} # Monitor everything in the plugin's directory + if not os.path.exists(self.data): + self.logger.warning("%s does not exist, creating" % self.data) + os.makedirs(self.data) self.add_directory_monitor('') + def set_debug(self, debug): + for entry in self.entries.values(): + if isinstance(entry, Debuggable): + entry.set_debug(debug) + return Debuggable.set_debug(self, debug) + def __getitem__(self, key): return self.entries[key] + def __len__(self): + return len(self.entries) + + def __delitem__(self, key): + del self.entries[key] + + def __setitem__(self, key, val): + self.entries[key] = val + def __iter__(self): return iter(list(self.entries.items())) @@ -259,7 +308,7 @@ class DirectoryBacked(object): dirpathname = os.path.join(self.data, relative) if relative not in self.handles.values(): if not os.path.isdir(dirpathname): - LOGGER.error("%s is not a directory" % dirpathname) + self.logger.error("%s is not a directory" % dirpathname) return reqid = self.fam.AddMonitor(dirpathname, self) self.handles[reqid] = relative @@ -303,8 +352,8 @@ class DirectoryBacked(object): return if event.requestID not in self.handles: - LOGGER.warn("Got %s event with unknown handle (%s) for %s" % - (action, event.requestID, event.filename)) + self.logger.warn("Got %s event with unknown handle (%s) for %s" % + (action, event.requestID, event.filename)) return # Clean up path names @@ -314,7 +363,7 @@ class DirectoryBacked(object): event.filename = event.filename[len(self.data) + 1:] if self.ignore and self.ignore.search(event.filename): - LOGGER.debug("Ignoring event %s" % event.filename) + self.logger.debug("Ignoring event %s" % event.filename) return # Calculate the absolute and relative paths this event refers to @@ -349,19 +398,20 @@ class DirectoryBacked(object): # class doesn't support canceling, so at least let # the user know that a restart might be a good # idea. - LOGGER.warn("Directory properties for %s changed, please " - " consider restarting the server" % abspath) + self.logger.warn("Directory properties for %s changed, " + "please consider restarting the server" % + abspath) else: # Got a "changed" event for a directory that we # didn't know about. Go ahead and treat it like a # "created" event, but log a warning, because this # is unexpected. - LOGGER.warn("Got %s event for unexpected dir %s" % - (action, abspath)) + self.logger.warn("Got %s event for unexpected dir %s" % + (action, abspath)) self.add_directory_monitor(relpath) else: - LOGGER.warn("Got unknown dir event %s %s %s" % - (event.requestID, event.code2str(), abspath)) + self.logger.warn("Got unknown dir event %s %s %s" % + (event.requestID, event.code2str(), abspath)) elif self.patterns.search(event.filename): if action in ['exists', 'created']: self.add_entry(relpath, event) @@ -373,16 +423,15 @@ class DirectoryBacked(object): # know about. Go ahead and treat it like a # "created" event, but log a warning, because this # is unexpected. - LOGGER.warn("Got %s event for unexpected file %s" % - (action, - abspath)) + self.logger.warn("Got %s event for unexpected file %s" % + (action, abspath)) self.add_entry(relpath, event) else: - LOGGER.warn("Got unknown file event %s %s %s" % - (event.requestID, event.code2str(), abspath)) + self.logger.warn("Got unknown file event %s %s %s" % + (event.requestID, event.code2str(), abspath)) else: - LOGGER.warn("Could not process filename %s; ignoring" % - event.filename) + self.logger.warn("Could not process filename %s; ignoring" % + event.filename) class XMLFileBacked(FileBacked): @@ -397,7 +446,11 @@ class XMLFileBacked(FileBacked): #: behavior, set ``__identifier__`` to ``None``. __identifier__ = 'name' - def __init__(self, filename, should_monitor=False): + #: If ``create`` is set, then it overrides the ``create`` argument + #: to the constructor. + create = None + + def __init__(self, filename, should_monitor=False, create=None): """ :param filename: The full path to the file to cache and monitor :type filename: string @@ -409,6 +462,13 @@ class XMLFileBacked(FileBacked): :class:`Bcfg2.Server.Plugin.helpers.XMLDirectoryBacked` object). :type should_monitor: bool + :param create: Create the file if it doesn't exist. + ``create`` can be either an + :class:`lxml.etree._Element` object, which will + be used as initial content, or a string, which + will be used as the name of the (empty) tag + that will be the initial content of the file. + :type create: lxml.etree._Element or string .. ----- .. autoattribute:: __identifier__ @@ -432,6 +492,21 @@ class XMLFileBacked(FileBacked): #: "Extra" files included in this file by XInclude. self.extras = [] + #: Extra FAM monitors set by this object for files included by + #: XInclude. + self.extra_monitors = [] + + if ((create is not None or self.create not in [None, False]) and + not os.path.exists(self.name)): + toptag = create or self.create + self.logger.warning("%s does not exist, creating" % self.name) + if hasattr(toptag, "getroottree"): + el = toptag + else: + el = lxml.etree.Element(toptag) + el.getroottree().write(self.name, xml_declaration=False, + pretty_print=True) + #: Whether or not to monitor this file for changes. self.should_monitor = should_monitor if should_monitor: @@ -471,9 +546,11 @@ class XMLFileBacked(FileBacked): parent.remove(el) for extra in extras: if extra != self.name and extra not in self.extras: - self.add_monitor(extra) + self.extras.append(extra) lxml.etree.SubElement(parent, xinclude, href=extra) self._follow_xincludes(fname=extra) + if extra not in self.extra_monitors: + self.add_monitor(extra) def Index(self): self.xdata = lxml.etree.XML(self.data, base_url=self.name, @@ -502,7 +579,7 @@ class XMLFileBacked(FileBacked): :type fpath: string :returns: None """ - self.extras.append(fpath) + self.extra_monitors.append(fpath) if self.should_monitor: self.fam.AddMonitor(fpath, self) @@ -837,8 +914,8 @@ class XMLSrc(XMLFileBacked): __cacheobj__ = dict __priority_required__ = True - def __init__(self, filename, should_monitor=False): - XMLFileBacked.__init__(self, filename, should_monitor) + def __init__(self, filename, should_monitor=False, create=None): + XMLFileBacked.__init__(self, filename, should_monitor, create) self.items = {} self.cache = None self.pnode = None @@ -938,7 +1015,7 @@ class XMLDirectoryBacked(DirectoryBacked): #: Only track and include files whose names (not paths) match this #: compiled regex. - patterns = re.compile('^.*\.xml$') + patterns = re.compile(r'^.*\.xml$') #: The type of child objects to create for files contained within #: the directory that is tracked. Default is @@ -1195,7 +1272,7 @@ class EntrySet(Debuggable): #: file is encountered that does not match the ``basename`` #: argument passed to the constructor or ``ignore``, then a #: warning will be produced. - ignore = re.compile("^(\.#.*|.*~|\\..*\\.(sw[px])|.*\\.genshi_include)$") + ignore = re.compile(r'^(\.#.*|.*~|\..*\.(sw[px])|.*\.genshi_include)$') # The ``basename`` argument passed to the constructor will be #: processed as a string that contains a regular expression (i.e., @@ -1258,8 +1335,8 @@ class EntrySet(Debuggable): base_pat = basename else: base_pat = re.escape(basename) - pattern = '(.*/)?%s(\.((H_(?P<hostname>\S+))|' % base_pat - pattern += '(G(?P<prio>\d+)_(?P<group>\S+))))?$' + pattern = r'(.*/)?' + base_pat + \ + r'(\.((H_(?P<hostname>\S+))|(G(?P<prio>\d+)_(?P<group>\S+))))?$' #: ``specific`` is a regular expression that is used to #: determine the specificity of a file in this entry set. It @@ -1520,8 +1597,6 @@ class GroupSpool(Plugin, Generator): def __init__(self, core, datastore): Plugin.__init__(self, core, datastore) Generator.__init__(self) - if self.data[-1] == '/': - self.data = self.data[:-1] self.fam = Bcfg2.Server.FileMonitor.get_fam() diff --git a/src/lib/Bcfg2/Server/Plugin/interfaces.py b/src/lib/Bcfg2/Server/Plugin/interfaces.py index 11a61ff9c..d460cc45d 100644 --- a/src/lib/Bcfg2/Server/Plugin/interfaces.py +++ b/src/lib/Bcfg2/Server/Plugin/interfaces.py @@ -337,12 +337,11 @@ class ThreadedStatistics(Statistics, Threaded, threading.Thread): pending_data = [] try: while not self.work_queue.empty(): - (metadata, data) = self.work_queue.get_nowait() - pending_data.append( - (metadata.hostname, - lxml.etree.tostring( - data, - xml_declaration=False).decode("UTF-8"))) + (metadata, xdata) = self.work_queue.get_nowait() + data = \ + lxml.etree.tostring(xdata, + xml_declaration=False).decode("UTF-8") + pending_data.append((metadata.hostname, data)) except Empty: pass @@ -409,7 +408,7 @@ class ThreadedStatistics(Statistics, Threaded, threading.Thread): def run(self): if not self._load(): return - while not self.terminate.isSet() and self.work_queue != None: + while not self.terminate.isSet() and self.work_queue is not None: try: (client, xdata) = self.work_queue.get(block=True, timeout=2) except Empty: @@ -419,7 +418,7 @@ class ThreadedStatistics(Statistics, Threaded, threading.Thread): self.logger.error("ThreadedStatistics: %s" % err) continue self.handle_statistic(client, xdata) - if self.work_queue != None and not self.work_queue.empty(): + if self.work_queue is not None and not self.work_queue.empty(): self._save() def process_statistics(self, metadata, data): diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py index b3781e299..e056c871a 100644 --- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py +++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgGenshiGenerator.py @@ -68,7 +68,7 @@ class CfgGenshiGenerator(CfgGenerator): #: exception in a Genshi template so we can provide a decent error #: message that actually tells the end user where an error #: occurred. - pyerror_re = re.compile('<\w+ u?[\'"](.*?)\s*\.\.\.[\'"]>') + pyerror_re = re.compile(r'<\w+ u?[\'"](.*?)\s*\.\.\.[\'"]>') def __init__(self, fname, spec, encoding): CfgGenerator.__init__(self, fname, spec, encoding) diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgPrivateKeyCreator.py b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgPrivateKeyCreator.py index 7ebce192c..735f23a1c 100644 --- a/src/lib/Bcfg2/Server/Plugins/Cfg/CfgPrivateKeyCreator.py +++ b/src/lib/Bcfg2/Server/Plugins/Cfg/CfgPrivateKeyCreator.py @@ -9,7 +9,7 @@ from Bcfg2.Server.Plugin import StructFile from Bcfg2.Server.Plugins.Cfg import CfgCreator, CfgCreationError from Bcfg2.Server.Plugins.Cfg.CfgPublicKeyCreator import CfgPublicKeyCreator try: - from Bcfg2.Server.Encryption import get_passphrases, ssl_encrypt + import Bcfg2.Server.Encryption HAS_CRYPTO = True except ImportError: HAS_CRYPTO = False @@ -51,8 +51,8 @@ class CfgPrivateKeyCreator(CfgCreator, StructFile): if (HAS_CRYPTO and self.setup.cfp.has_section("sshkeys") and self.setup.cfp.has_option("sshkeys", "passphrase")): - return get_passphrases()[self.setup.cfp.get("sshkeys", - "passphrase")] + return Bcfg2.Encrypption.get_passphrases()[ + self.setup.cfp.get("sshkeys", "passphrase")] return None def handle_event(self, event): @@ -72,7 +72,7 @@ class CfgPrivateKeyCreator(CfgCreator, StructFile): the given client metadata, and may be obtained by doing ``self.XMLMatch(metadata)`` :type spec: lxml.etree._Element - :returns: None + :returns: string - The filename of the private key """ if spec is None: spec = self.XMLMatch(metadata) @@ -142,7 +142,6 @@ class CfgPrivateKeyCreator(CfgCreator, StructFile): if spec is None: spec = self.XMLMatch(metadata) category = spec.get("category", self.category) - print("category=%s" % category) if category is None: per_host_default = "true" else: diff --git a/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py b/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py index 2301de725..3e464af49 100644 --- a/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py +++ b/src/lib/Bcfg2/Server/Plugins/Cfg/__init__.py @@ -77,7 +77,7 @@ class CfgBaseFileMatcher(Bcfg2.Server.Plugin.SpecificData, self.encoding = encoding self.setup = Bcfg2.Options.get_option_parser() __init__.__doc__ = Bcfg2.Server.Plugin.SpecificData.__init__.__doc__ + \ -""" + """ .. ----- .. autoattribute:: CfgBaseFileMatcher.__basenames__ .. autoattribute:: CfgBaseFileMatcher.__extensions__ @@ -101,12 +101,12 @@ class CfgBaseFileMatcher(Bcfg2.Server.Plugin.SpecificData, components = ['^(?P<basename>%s)' % '|'.join(re.escape(b) for b in basenames)] if cls.__specific__: - components.append('(|\\.H_(?P<hostname>\S+?)|' + - '\.G(?P<prio>\d+)_(?P<group>\S+?))') + components.append(r'(|\.H_(?P<hostname>\S+?)|' + + r'\.G(?P<prio>\d+)_(?P<group>\S+?))') if cls.__extensions__: - components.append('\\.(?P<extension>%s)' % - '|'.join(cls.__extensions__)) - components.append('$') + components.append(r'\.(?P<extension>%s)' % + r'|'.join(cls.__extensions__)) + components.append(r'$') return re.compile("".join(components)) @classmethod @@ -563,6 +563,8 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet, else: try: if not isinstance(data, unicode): + if not isinstance(data, str): + data = data.decode('utf-8') data = u_str(data, self.encoding) except UnicodeDecodeError: msg = "Failed to decode %s: %s" % (entry.get('name'), @@ -715,8 +717,8 @@ class CfgEntrySet(Bcfg2.Server.Plugin.EntrySet, pass if not rv or not rv[0].hostname: - rv.append(Bcfg2.Server.Plugin.Specificity( - hostname=metadata.hostname)) + rv.append( + Bcfg2.Server.Plugin.Specificity(hostname=metadata.hostname)) return rv def build_filename(self, specific): diff --git a/src/lib/Bcfg2/Server/Plugins/FileProbes.py b/src/lib/Bcfg2/Server/Plugins/FileProbes.py index 33914bd45..316e4bc53 100644 --- a/src/lib/Bcfg2/Server/Plugins/FileProbes.py +++ b/src/lib/Bcfg2/Server/Plugins/FileProbes.py @@ -67,7 +67,8 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin, self.config = \ Bcfg2.Server.Plugin.StructFile(os.path.join(self.data, 'config.xml'), - should_monitor=True) + should_monitor=True, + create=self.name) self.entries = dict() self.probes = dict() @@ -225,11 +226,8 @@ class FileProbes(Bcfg2.Server.Plugin.Plugin, root = lxml.etree.Element("FileInfo") root.append(info) try: - open(infoxml, - "w").write( - lxml.etree.tostring(root, - xml_declaration=False, - pretty_print=True).decode('UTF-8')) + root.getroottree().write(infoxml, xml_declaration=False, + pretty_print=True) except IOError: err = sys.exc_info()[1] self.logger.error("Could not write %s: %s" % (infoxml, err)) diff --git a/src/lib/Bcfg2/Server/Plugins/Git.py b/src/lib/Bcfg2/Server/Plugins/Git.py index 781413e1a..58a5c58f0 100644 --- a/src/lib/Bcfg2/Server/Plugins/Git.py +++ b/src/lib/Bcfg2/Server/Plugins/Git.py @@ -46,7 +46,7 @@ class Git(Version): else: cmd = ["git", "--git-dir", self.vcs_path, "--work-tree", self.vcs_root, "rev-parse", "HEAD"] - self.debug_log("Git: Running cmd") + self.debug_log("Git: Running %s" % cmd) result = self.cmd.run(cmd) if not result.success: raise Exception(result.stderr) diff --git a/src/lib/Bcfg2/Server/Plugins/GroupLogic.py b/src/lib/Bcfg2/Server/Plugins/GroupLogic.py new file mode 100644 index 000000000..810b273af --- /dev/null +++ b/src/lib/Bcfg2/Server/Plugins/GroupLogic.py @@ -0,0 +1,47 @@ +""" GroupLogic is a connector plugin that lets you use an XML Genshi +template to dynamically set additional groups for clients. """ + +import os +import lxml.etree +import Bcfg2.Server.Plugin +try: + from Bcfg2.Server.Plugins.Bundler import BundleTemplateFile +except ImportError: + # BundleTemplateFile missing means that genshi is missing. we + # import genshi to get the _real_ error + import genshi # pylint: disable=W0611 + + +class GroupLogicConfig(BundleTemplateFile): + """ Representation of the GroupLogic groups.xml file """ + create = lxml.etree.Element("GroupLogic", + nsmap=dict(py="http://genshi.edgewall.org/")) + + def __init__(self, name, fam): + BundleTemplateFile.__init__(self, name, + Bcfg2.Server.Plugin.Specificity(), None) + self.fam = fam + self.should_monitor = True + self.fam.AddMonitor(self.name, self) + + def _match(self, item, metadata): + if item.tag == 'Group' and not len(item.getchildren()): + return [item] + return BundleTemplateFile._match(self, item, metadata) + + +class GroupLogic(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Connector): + """ GroupLogic is a connector plugin that lets you use an XML + Genshi template to dynamically set additional groups for + clients. """ + + def __init__(self, core, datastore): + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Connector.__init__(self) + self.config = GroupLogicConfig(os.path.join(self.data, "groups.xml"), + core.fam) + + def get_additional_groups(self, metadata): + return [el.get("name") + for el in self.config.get_xml_value(metadata).findall("Group")] diff --git a/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py b/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py index 99f01201b..9042a979e 100644 --- a/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py +++ b/src/lib/Bcfg2/Server/Plugins/GroupPatterns.py @@ -3,7 +3,6 @@ import os import re import sys -import logging import Bcfg2.Server.Lint import Bcfg2.Server.Plugin from Bcfg2.Utils import PackedDigitRange @@ -16,16 +15,16 @@ class PatternMap(object): self.pattern = pattern self.rangestr = rangestr self.groups = groups - if pattern != None: + if pattern is not None: self.re = re.compile(pattern) self.process = self.process_re - elif rangestr != None: + elif rangestr is not None: if '\\' in rangestr: raise Exception("Backslashes are not allowed in NameRanges") range_finder = r'\[\[[\d\-,]+\]\]' self.process = self.process_range - self.re = re.compile('^' + re.sub(range_finder, '(\d+)', - rangestr)) + self.re = re.compile(r'^' + re.sub(range_finder, r'(\d+)', + rangestr)) dmatcher = re.compile(re.sub(range_finder, r'\[\[([\d\-,]+)\]\]', rangestr)) @@ -67,13 +66,13 @@ class PatternMap(object): class PatternFile(Bcfg2.Server.Plugin.XMLFileBacked): """ representation of GroupPatterns config.xml """ __identifier__ = None + create = 'GroupPatterns' def __init__(self, filename, core=None): Bcfg2.Server.Plugin.XMLFileBacked.__init__(self, filename, should_monitor=True) self.core = core self.patterns = [] - self.logger = logging.getLogger(self.__class__.__name__) def Index(self): Bcfg2.Server.Plugin.XMLFileBacked.Index(self) diff --git a/src/lib/Bcfg2/Server/Plugins/Metadata.py b/src/lib/Bcfg2/Server/Plugins/Metadata.py index 7f8db7b6d..49e36f72b 100644 --- a/src/lib/Bcfg2/Server/Plugins/Metadata.py +++ b/src/lib/Bcfg2/Server/Plugins/Metadata.py @@ -75,6 +75,7 @@ if HAS_DJANGO: yield client.hostname def keys(self): + """ Get keys for the mapping """ return [c.hostname for c in MetadataClientModel.objects.all()] def __contains__(self, key): @@ -94,8 +95,10 @@ class XMLMetadataConfig(Bcfg2.Server.Plugin.XMLFileBacked): # then we immediately set should_monitor to the proper value, # so that XInclude'd files get properly watched fpath = os.path.join(metadata.data, basefile) + toptag = os.path.splitext(basefile)[0].title() Bcfg2.Server.Plugin.XMLFileBacked.__init__(self, fpath, - should_monitor=False) + should_monitor=False, + create=toptag) self.should_monitor = watch_clients self.metadata = metadata self.basefile = basefile @@ -325,6 +328,11 @@ class ClientMetadata(object): return grp return '' + def __repr__(self): + return "%s(%s, profile=%s, groups=%s)" % (self.__class__.__name__, + self.hostname, + self.profile, self.groups) + class MetadataQuery(object): """ This class provides query methods for the metadata of all @@ -438,7 +446,7 @@ class MetadataQuery(object): return [self.by_name(name) for name in self.all_clients()] -class MetadataGroup(tuple): +class MetadataGroup(tuple): # pylint: disable=E0012,R0924 """ representation of a metadata group. basically just a named tuple """ # pylint: disable=R0913,W0613 @@ -595,7 +603,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata, def _add_xdata(self, config, tag, name, attribs=None, alias=False): """ Generic method to add XML data (group, client, etc.) """ node = self._search_xdata(tag, name, config.xdata, alias=alias) - if node != None: + if node is not None: raise Bcfg2.Server.Plugin.MetadataConsistencyError("%s \"%s\" " "already exists" % (tag, name)) @@ -655,7 +663,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata, def _update_xdata(self, config, tag, name, attribs, alias=False): """ Generic method to modify XML data (group, client, etc.) """ node = self._search_xdata(tag, name, config.xdata, alias=alias) - if node == None: + if node is None: self.logger.error("%s \"%s\" does not exist" % (tag, name)) raise Bcfg2.Server.Plugin.MetadataConsistencyError xdict = config.find_xml_for_xpath('.//%s[@name="%s"]' % @@ -672,7 +680,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata, """Update a groups attributes.""" if self._use_db: msg = "Metadata does not support updating groups with " + \ - "use_database enabled" + "use_database enabled" self.logger.error(msg) raise Bcfg2.Server.Plugin.PluginExecutionError(msg) else: @@ -700,7 +708,7 @@ class Metadata(Bcfg2.Server.Plugin.Metadata, def _remove_xdata(self, config, tag, name): """ Generic method to remove XML data (group, client, etc.) """ node = self._search_xdata(tag, name, config.xdata) - if node == None: + if node is None: self.logger.error("%s \"%s\" does not exist" % (tag, name)) raise Bcfg2.Server.Plugin.MetadataConsistencyError xdict = config.find_xml_for_xpath('.//%s[@name="%s"]' % @@ -936,16 +944,11 @@ class Metadata(Bcfg2.Server.Plugin.Metadata, if group not in self.groups: self.debug_log("Client %s set as nonexistent group %s" % (client, group)) - for gname, ginfo in list(self.groups.items()): - for group in ginfo.groups: - if group not in self.groups: - self.debug_log("Group %s set as nonexistent group %s" % - (gname, group)) def set_profile(self, client, profile, addresspair): """Set group parameter for provided client.""" - self.logger.info("Asserting client %s profile to %s" % - (client, profile)) + self.logger.info("Asserting client %s profile to %s" % (client, + profile)) if False in list(self.states.values()): raise Bcfg2.Server.Plugin.MetadataRuntimeError("Metadata has not " "been read yet") @@ -996,19 +999,18 @@ class Metadata(Bcfg2.Server.Plugin.Metadata, self.clients_xml.write() def set_version(self, client, version): - """Set group parameter for provided client.""" - if client in self.clients: - if client not in self.versions or version != self.versions[client]: - self.logger.info("Setting client %s version to %s" % - (client, version)) - if not self._use_db: - self.update_client(client, dict(version=version)) - self.clients_xml.write() - self.versions[client] = version - else: - msg = "Cannot set version on non-existent client %s" % client - self.logger.error(msg) - raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg) + """Set version for provided client.""" + if client not in self.clients: + # this creates the client as a side effect + self.get_initial_metadata(client) + + if client not in self.versions or version != self.versions[client]: + self.logger.info("Setting client %s version to %s" % (client, + version)) + if not self._use_db: + self.update_client(client, dict(version=version)) + self.clients_xml.write() + self.versions[client] = version def resolve_client(self, addresspair, cleanup_cache=False): """Lookup address locally or in DNS to get a hostname.""" @@ -1085,7 +1087,6 @@ class Metadata(Bcfg2.Server.Plugin.Metadata, raise Bcfg2.Server.Plugin.MetadataRuntimeError("Metadata has not " "been read yet") client = client.lower() - if client in self.core.metadata_cache: return self.core.metadata_cache[client] @@ -1096,6 +1097,29 @@ class Metadata(Bcfg2.Server.Plugin.Metadata, categories = dict() profile = None + def _add_group(grpname): + """ Add a group to the set of groups for this client. + Handles setting categories and category suppression. + Returns the new profile for the client (which might be + unchanged). """ + groups.add(grpname) + if grpname in self.groups: + group = self.groups[grpname] + category = group.category + if category: + if category in categories: + self.logger.warning("%s: Group %s suppressed by " + "category %s; %s already a member " + "of %s" % + (self.name, grpname, category, + client, categories[category])) + return + categories[category] = grpname + if not profile and group.is_profile: + return grpname + else: + return profile + if client not in self.clients: pgroup = None if client in self.clientgroups: @@ -1105,41 +1129,28 @@ class Metadata(Bcfg2.Server.Plugin.Metadata, if pgroup: self.set_profile(client, pgroup, (None, None)) - groups.add(pgroup) - category = self.groups[pgroup].category - if category: - categories[category] = pgroup - if (pgroup in self.groups and self.groups[pgroup].is_profile): - profile = pgroup + profile = _add_group(pgroup) else: msg = "Cannot add new client %s; no default group set" % client self.logger.error(msg) raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg) - if client in self.clientgroups: - for cgroup in self.clientgroups[client]: - if cgroup in groups: - continue - if cgroup not in self.groups: - self.groups[cgroup] = MetadataGroup(cgroup) - category = self.groups[cgroup].category - if category and category in categories: - self.logger.warning("%s: Group %s suppressed by " - "category %s; %s already a member " - "of %s" % - (self.name, cgroup, category, - client, categories[category])) - continue - if category: - categories[category] = cgroup - groups.add(cgroup) - # favor client groups for setting profile - if not profile and self.groups[cgroup].is_profile: - profile = cgroup + for cgroup in self.clientgroups.get(client, []): + if cgroup in groups: + continue + if cgroup not in self.groups: + self.groups[cgroup] = MetadataGroup(cgroup) + profile = _add_group(cgroup) groups, categories = self._merge_groups(client, groups, categories=categories) + if len(groups) == 0 and self.default: + # no initial groups; add the default profile + profile = _add_group(self.default) + groups, categories = self._merge_groups(client, groups, + categories=categories) + bundles = set() for group in groups: try: @@ -1475,6 +1486,7 @@ class MetadataLint(Bcfg2.Server.Lint.ServerPlugin): self.duplicate_groups() self.duplicate_default_groups() self.duplicate_clients() + self.default_is_profile() @classmethod def Errors(cls): @@ -1484,11 +1496,15 @@ class MetadataLint(Bcfg2.Server.Lint.ServerPlugin): "non-profile-set-as-profile": "error", "duplicate-group": "error", "duplicate-client": "error", - "multiple-default-groups": "error"} + "multiple-default-groups": "error", + "default-is-not-profile": "error"} def deprecated_options(self): """ check for the location='floating' option, which has been deprecated in favor of floating='true' """ + if not hasattr(self.metadata, "clients_xml"): + # using metadata database + return clientdata = self.metadata.clients_xml.xdata for el in clientdata.xpath("//Client"): loc = el.get("location") @@ -1514,6 +1530,9 @@ class MetadataLint(Bcfg2.Server.Lint.ServerPlugin): def bogus_profiles(self): """ check for clients that have profiles that are either not flagged as public groups in groups.xml, or don't exist """ + if not hasattr(self.metadata, "clients_xml"): + # using metadata database + return for client in self.metadata.clients_xml.xdata.findall('.//Client'): profile = client.get("profile") if profile not in self.metadata.groups: @@ -1533,8 +1552,8 @@ class MetadataLint(Bcfg2.Server.Lint.ServerPlugin): tag as a definition if it a) has profile or public set; or b) has any children. """ self.duplicate_entries( - self.metadata.groups_xml.xdata.xpath("//Groups/Group") + \ - self.metadata.groups_xml.xdata.xpath("//Groups/Group//Group"), + self.metadata.groups_xml.xdata.xpath("//Groups/Group") + + self.metadata.groups_xml.xdata.xpath("//Groups/Group//Group"), "group", include=lambda g: (g.get("profile") or g.get("public") or @@ -1554,6 +1573,9 @@ class MetadataLint(Bcfg2.Server.Lint.ServerPlugin): def duplicate_clients(self): """ check for clients that are defined twice. """ + if not hasattr(self.metadata, "clients_xml"): + # using metadata database + return self.duplicate_entries( self.metadata.clients_xml.xdata.xpath("//Client"), "client") @@ -1574,3 +1596,14 @@ class MetadataLint(Bcfg2.Server.Lint.ServerPlugin): self.LintError("duplicate-%s" % etype, "%s %s is defined multiple times:\n%s" % (etype.title(), ename, "\n".join(els))) + + def default_is_profile(self): + """ ensure that the default group is a profile group """ + if (self.metadata.default and + not self.metadata.groups[self.metadata.default].is_profile): + xdata = \ + self.metadata.groups_xml.xdata.xpath("//Group[@name='%s']" % + self.metadata.default)[0] + self.LintError("default-is-not-profile", + "Default group is not a profile group:\n%s" % + self.RenderXML(xdata)) diff --git a/src/lib/Bcfg2/Server/Plugins/NagiosGen.py b/src/lib/Bcfg2/Server/Plugins/NagiosGen.py index d5ea0cb24..9603cd518 100644 --- a/src/lib/Bcfg2/Server/Plugins/NagiosGen.py +++ b/src/lib/Bcfg2/Server/Plugins/NagiosGen.py @@ -5,23 +5,22 @@ import re import sys import glob import socket -import Bcfg2.Server.Plugin +from Bcfg2.Server.Plugin import Plugin, Generator, StructFile, \ + PluginExecutionError -class NagiosGen(Bcfg2.Server.Plugin.Plugin, - Bcfg2.Server.Plugin.Generator): +class NagiosGen(Plugin, Generator): """ NagiosGen is a Bcfg2 plugin that dynamically generates Nagios configuration file based on Bcfg2 data. """ __author__ = 'bcfg-dev@mcs.anl.gov' line_fmt = '\t%-32s %s' def __init__(self, core, datastore): - Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) - Bcfg2.Server.Plugin.Generator.__init__(self) + Plugin.__init__(self, core, datastore) + Generator.__init__(self) self.config = \ - Bcfg2.Server.Plugin.StructFile(os.path.join(self.data, - 'config.xml'), - should_monitor=True) + StructFile(os.path.join(self.data, 'config.xml'), + should_monitor=True, create=self.name) self.Entries = {'Path': {'/etc/nagiosgen.status': self.createhostconfig, '/etc/nagios/nagiosgen.cfg': self.createserverconfig}} @@ -42,9 +41,9 @@ class NagiosGen(Bcfg2.Server.Plugin.Plugin, try: host_address = socket.gethostbyname(metadata.hostname) except socket.gaierror: - self.logger.error("Failed to find IP address for %s" % - metadata.hostname) - raise Bcfg2.Server.Plugin.PluginExecutionError + self.logger.error() + raise PluginExecutionError("Failed to find IP address for %s" % + metadata.hostname) host_groups = [grp for grp in metadata.groups if os.path.isfile('%s/%s-group.cfg' % (self.data, grp))] host_config = ['define host {', diff --git a/src/lib/Bcfg2/Server/Plugins/Ohai.py b/src/lib/Bcfg2/Server/Plugins/Ohai.py index 8b73f0ba7..18261be10 100644 --- a/src/lib/Bcfg2/Server/Plugins/Ohai.py +++ b/src/lib/Bcfg2/Server/Plugins/Ohai.py @@ -2,8 +2,10 @@ operating system using ohai (http://wiki.opscode.com/display/chef/Ohai) """ -import lxml.etree import os +import sys +import glob +import lxml.etree import Bcfg2.Server.Plugin try: @@ -31,22 +33,39 @@ class OhaiCache(object): self.dirname = dirname self.cache = dict() + def hostpath(self, host): + """ Get the path to the file that contains Ohai data for the + given host """ + return os.path.join(self.dirname, "%s.json" % host) + def __setitem__(self, item, value): - if value == None: + if value is None: # simply return if the client returned nothing return self.cache[item] = json.loads(value) - open("%s/%s.json" % (self.dirname, item), 'w').write(value) + open(self.hostpath(item), 'w').write(value) def __getitem__(self, item): if item not in self.cache: try: - data = open("%s/%s.json" % (self.dirname, item)).read() + data = open(self.hostpath(item)).read() except: raise KeyError(item) self.cache[item] = json.loads(data) return self.cache[item] + def __delitem__(self, item): + if item in self.cache: + del self.cache[item] + try: + os.unlink(self.hostpath(item)) + except: + raise IndexError("Could not unlink %s: %s" % (self.hostpath(item), + sys.exc_info()[1])) + + def __len__(self): + return len(glob.glob(self.hostpath('*'))) + def __iter__(self): data = list(self.cache.keys()) data.extend([x[:-5] for x in os.listdir(self.dirname)]) @@ -67,10 +86,6 @@ class Ohai(Bcfg2.Server.Plugin.Plugin, self.probe = lxml.etree.Element('probe', name='Ohai', source='Ohai', interpreter='/bin/sh') self.probe.text = PROBECODE - try: - os.stat(self.data) - except OSError: - os.makedirs(self.data) self.cache = OhaiCache(self.data) def GetProbes(self, _): diff --git a/src/lib/Bcfg2/Server/Plugins/POSIXCompat.py b/src/lib/Bcfg2/Server/Plugins/POSIXCompat.py index 490ee6f20..1736becc7 100644 --- a/src/lib/Bcfg2/Server/Plugins/POSIXCompat.py +++ b/src/lib/Bcfg2/Server/Plugins/POSIXCompat.py @@ -15,7 +15,7 @@ class POSIXCompat(Bcfg2.Server.Plugin.Plugin, def validate_goals(self, metadata, goals): """Verify that we are generating correct old POSIX entries.""" - if metadata.version_info and metadata.version_info > (1, 3, 0, '', 0): + if metadata.version_info and metadata.version_info >= (1, 3, 0, '', 0): # do not care about a client that is _any_ 1.3.0 release # (including prereleases and RCs) return diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py b/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py index 57f802bb5..48c580be1 100644 --- a/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py +++ b/src/lib/Bcfg2/Server/Plugins/Packages/Apt.py @@ -29,8 +29,8 @@ class AptCollection(Collection): """ Get an APT configuration file (i.e., ``sources.list``). :returns: string """ - lines = ["# This config was generated automatically by the Bcfg2 " \ - "Packages plugin", ''] + lines = ["# This config was generated automatically by the Bcfg2 " + "Packages plugin", ''] for source in self: if source.rawurl: @@ -88,6 +88,8 @@ class AptSource(Source): self.logger.error("Packages: Failed to read file %s" % fname) raise for line in reader.readlines(): + if not isinstance(line, str): + line = line.decode('utf-8') words = str(line.strip()).split(':', 1) if words[0] == 'Package': pkgname = words[1].strip().rstrip() @@ -99,8 +101,8 @@ class AptSource(Source): vindex = 0 for dep in words[1].split(','): if '|' in dep: - cdeps = [re.sub('\s+', '', - re.sub('\(.*\)', '', cdep)) + cdeps = [re.sub(r'\s+', '', + re.sub(r'\(.*\)', '', cdep)) for cdep in dep.split('|')] dyn_dname = "choice-%s-%s-%s" % (pkgname, barch, @@ -109,7 +111,7 @@ class AptSource(Source): bdeps[barch][pkgname].append(dyn_dname) bprov[barch][dyn_dname] = set(cdeps) else: - raw_dep = re.sub('\(.*\)', '', dep) + raw_dep = re.sub(r'\(.*\)', '', dep) raw_dep = raw_dep.rstrip().strip() bdeps[barch][pkgname].append(raw_dep) elif words[0] == 'Provides': diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py b/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py index e9744c777..aa6127f57 100644 --- a/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py +++ b/src/lib/Bcfg2/Server/Plugins/Packages/PackagesSources.py @@ -9,6 +9,7 @@ from Bcfg2.Server.Statistics import track_statistics from Bcfg2.Server.Plugins.Packages.Source import SourceInitError +# pylint: disable=E0012,R0924 class PackagesSources(Bcfg2.Server.Plugin.StructFile, Bcfg2.Server.Plugin.Debuggable): """ PackagesSources handles parsing of the @@ -17,6 +18,9 @@ class PackagesSources(Bcfg2.Server.Plugin.StructFile, :class:`Bcfg2.Server.Plugins.Packages.Source.Source` object for each ``Source`` tag. """ + __identifier__ = None + create = "Sources" + def __init__(self, filename, cachepath, packages): """ :param filename: The full path to ``sources.xml`` @@ -34,14 +38,8 @@ class PackagesSources(Bcfg2.Server.Plugin.StructFile, If ``sources.xml`` cannot be read """ Bcfg2.Server.Plugin.Debuggable.__init__(self) - try: - Bcfg2.Server.Plugin.StructFile.__init__(self, filename, - should_monitor=True) - except OSError: - err = sys.exc_info()[1] - msg = "Packages: Failed to read configuration file: %s" % err - self.logger.error(msg) - raise Bcfg2.Server.Plugin.PluginInitError(msg) + Bcfg2.Server.Plugin.StructFile.__init__(self, filename, + should_monitor=True) #: The full path to the directory where #: :class:`Bcfg2.Server.Plugins.Packages.Source.Source` data @@ -124,7 +122,7 @@ class PackagesSources(Bcfg2.Server.Plugin.StructFile, """ Create a :class:`Bcfg2.Server.Plugins.Packages.Source.Source` subclass object from XML representation of a source in ``sources.xml``. - ``source_from-xml`` determines the appropriate subclass of + ``source_from_xml`` determines the appropriate subclass of ``Source`` to instantiate according to the ``type`` attribute of the ``Source`` tag. diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Source.py b/src/lib/Bcfg2/Server/Plugins/Packages/Source.py index 5cf90e188..3319dda78 100644 --- a/src/lib/Bcfg2/Server/Plugins/Packages/Source.py +++ b/src/lib/Bcfg2/Server/Plugins/Packages/Source.py @@ -52,8 +52,8 @@ import sys import Bcfg2.Server.Plugin from Bcfg2.Options import get_option_parser from Bcfg2.Compat import HTTPError, HTTPBasicAuthHandler, \ - HTTPPasswordMgrWithDefaultRealm, install_opener, build_opener, \ - urlopen, cPickle, md5 + HTTPPasswordMgrWithDefaultRealm, install_opener, build_opener, urlopen, \ + cPickle, md5 from Bcfg2.Server.Statistics import track_statistics @@ -66,7 +66,7 @@ def fetch_url(url): :raises: URLError - Failure fetching URL :returns: string - the content of the page at the given URL """ if '@' in url: - mobj = re.match('(\w+://)([^:]+):([^@]+)@(.*)$', url) + mobj = re.match(r'(\w+://)([^:]+):([^@]+)@(.*)$', url) if not mobj: raise ValueError("Invalid URL") user = mobj.group(2) @@ -308,7 +308,7 @@ class Source(Bcfg2.Server.Plugin.Debuggable): # pylint: disable=R0902 :raises: OSError - If the saved data cannot be read :raises: cPickle.UnpicklingError - If the saved data is corrupt """ - data = open(self.cachefile) + data = open(self.cachefile, 'rb') (self.pkgnames, self.deps, self.provides, self.essentialpkgs) = cPickle.load(data) @@ -608,7 +608,7 @@ class Source(Bcfg2.Server.Plugin.Debuggable): # pylint: disable=R0902 self.logger.info("Packages: Updating %s" % url) fname = self.escape_url(url) try: - open(fname, 'w').write(fetch_url(url)) + open(fname, 'wb').write(fetch_url(url)) except ValueError: self.logger.error("Packages: Bad url string %s" % url) raise diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py b/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py index 4535fb76d..ab96d3f59 100644 --- a/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py +++ b/src/lib/Bcfg2/Server/Plugins/Packages/Yum.py @@ -68,7 +68,7 @@ from Bcfg2.Compat import StringIO, cPickle, HTTPError, URLError, \ # pylint: enable=W0622 from Bcfg2.Server.Plugins.Packages.Collection import Collection from Bcfg2.Server.Plugins.Packages.Source import SourceInitError, Source, \ - fetch_url + fetch_url from Bcfg2.Server.Statistics import track_statistics LOGGER = logging.getLogger(__name__) @@ -279,7 +279,7 @@ class YumCollection(Collection): #: Define a unique cache file for this collection to use #: for cached yum metadata self.cachefile = os.path.join(self.cachepath, - "cache-%s" % self.cachekey) + "cache-%s" % self.cachekey) if not os.path.exists(self.cachefile): os.mkdir(self.cachefile) @@ -420,7 +420,7 @@ class YumCollection(Collection): config.add_section(reponame) added = True except ConfigParser.DuplicateSectionError: - match = re.search("-(\d+)", reponame) + match = re.search(r'-(\d+)', reponame) if match: rid = int(match.group(1)) + 1 else: @@ -1153,7 +1153,7 @@ class YumSource(Source): if entry.get('name').startswith('/'): self.needed_paths.add(entry.get('name')) pro = pdata.find(RP + 'provides') - if pro != None: + if pro is not None: for entry in pro.getchildren(): prov = entry.get('name') if prov not in self.provides[arch]: @@ -1169,9 +1169,9 @@ class YumSource(Source): try: groupid = group.xpath('id')[0].text self.yumgroups[groupid] = {'mandatory': list(), - 'default': list(), - 'optional': list(), - 'conditional': list()} + 'default': list(), + 'optional': list(), + 'conditional': list()} except IndexError: continue try: diff --git a/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py b/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py index 2175cf0aa..052c362ab 100644 --- a/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py +++ b/src/lib/Bcfg2/Server/Plugins/Packages/__init__.py @@ -5,7 +5,6 @@ determine the completeness of the client configuration. """ import os import sys import glob -import copy import shutil import lxml.etree import Bcfg2.Logger @@ -20,7 +19,8 @@ from Bcfg2.Server.Statistics import track_statistics YUM_CONFIG_DEFAULT = "/etc/yum.repos.d/bcfg2.repo" #: The default path for generated apt configs -APT_CONFIG_DEFAULT = "/etc/apt/sources.d/bcfg2" +APT_CONFIG_DEFAULT = \ + "/etc/apt/sources.list.d/bcfg2-packages-generated-sources.list" class Packages(Bcfg2.Server.Plugin.Plugin, @@ -177,6 +177,14 @@ class Packages(Bcfg2.Server.Plugin.Plugin, for (key, value) in list(attrib.items()): entry.attrib.__setitem__(key, value) + def get_config(self, metadata): + """ Get yum/apt config, as a string, for the specified client. + + :param metadata: The client to create the config for. + :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata + """ + return self.get_collection(metadata).get_config() + def HandleEntry(self, entry, metadata): """ Bind configuration entries. ``HandleEntry`` handles entries two different ways: @@ -226,14 +234,14 @@ class Packages(Bcfg2.Server.Plugin.Plugin, return True elif entry.tag == 'Path': # managed entries for yum/apt configs - if (entry.get("name") == \ - self.core.setup.cfp.get("packages", - "yum_config", - default=YUM_CONFIG_DEFAULT) or - entry.get("name") == \ - self.core.setup.cfp.get("packages", - "apt_config", - default=APT_CONFIG_DEFAULT)): + if (entry.get("name") == + self.core.setup.cfp.get("packages", + "yum_config", + default=YUM_CONFIG_DEFAULT) or + entry.get("name") == + self.core.setup.cfp.get("packages", + "apt_config", + default=APT_CONFIG_DEFAULT)): return True return False @@ -523,7 +531,7 @@ class Packages(Bcfg2.Server.Plugin.Plugin, """ collection = self.get_collection(metadata) return dict(sources=collection.get_additional_data(), - allsources=copy.deepcopy(self.sources)) + get_config=self.get_config) def end_client_run(self, metadata): """ Hook to clear the cache for this client in diff --git a/src/lib/Bcfg2/Server/Plugins/Probes.py b/src/lib/Bcfg2/Server/Plugins/Probes.py index 2ea5088de..7e4935d74 100644 --- a/src/lib/Bcfg2/Server/Plugins/Probes.py +++ b/src/lib/Bcfg2/Server/Plugins/Probes.py @@ -60,12 +60,12 @@ class ClientProbeDataSet(dict): dict.__init__(self, *args, **kwargs) -class ProbeData(str): +class ProbeData(str): # pylint: disable=E0012,R0924 """ a ProbeData object emulates a str object, but also has .xdata, .json, and .yaml properties to provide convenient ways to use ProbeData objects as XML, JSON, or YAML data """ def __new__(cls, data): - return str.__new__(cls, data) + return str.__new__(cls, data.encode('utf-8')) def __init__(self, data): # pylint: disable=W0613 str.__init__(self) @@ -113,15 +113,15 @@ class ProbeData(str): class ProbeSet(Bcfg2.Server.Plugin.EntrySet): """ Handle universal and group- and host-specific probe files """ - ignore = re.compile("^(\.#.*|.*~|\\..*\\.(tmp|sw[px])|probed\\.xml)$") + ignore = re.compile(r'^(\.#.*|.*~|\..*\.(tmp|sw[px])|probed\.xml)$') probename = \ - re.compile("(.*/)?(?P<basename>\S+?)(\.(?P<mode>(?:G\d\d)|H)_\S+)?$") - bangline = re.compile('^#!\s*(?P<interpreter>.*)$') + re.compile(r'(.*/)?(?P<basename>\S+?)(\.(?P<mode>(?:G\d\d)|H)_\S+)?$') + bangline = re.compile(r'^#!\s*(?P<interpreter>.*)$') basename_is_regex = True def __init__(self, path, encoding, plugin_name): self.plugin_name = plugin_name - Bcfg2.Server.Plugin.EntrySet.__init__(self, '[0-9A-Za-z_\-]+', path, + Bcfg2.Server.Plugin.EntrySet.__init__(self, r'[0-9A-Za-z_\-]+', path, Bcfg2.Server.Plugin.SpecificData, encoding) Bcfg2.Server.FileMonitor.get_fam().AddMonitor(path, self) @@ -155,7 +155,20 @@ class ProbeSet(Bcfg2.Server.Plugin.EntrySet): probe = lxml.etree.Element('probe') probe.set('name', os.path.basename(name)) probe.set('source', self.plugin_name) - probe.text = entry.data + if (metadata.version_info and + metadata.version_info > (1, 3, 1, '', 0)): + try: + probe.text = entry.data.decode('utf-8') + except AttributeError: + probe.text = entry.data + else: + try: + probe.text = entry.data + except: # pylint: disable=W0702 + self.logger.error("Client unable to handle unicode " + "probes. Skipping %s" % + probe.get('name')) + continue match = self.bangline.match(entry.data.split('\n')[0]) if match: probe.set('interpreter', match.group('interpreter')) @@ -211,15 +224,21 @@ class Probes(Bcfg2.Server.Plugin.Probing, lxml.etree.SubElement(top, 'Client', name=client, timestamp=str(int(probedata.timestamp))) for probe in sorted(probedata): - lxml.etree.SubElement(ctag, 'Probe', name=probe, - value=str(self.probedata[client][probe])) + try: + lxml.etree.SubElement( + ctag, 'Probe', name=probe, + value=str( + self.probedata[client][probe]).decode('utf-8')) + except AttributeError: + lxml.etree.SubElement( + ctag, 'Probe', name=probe, + value=str(self.probedata[client][probe])) for group in sorted(self.cgroups[client]): lxml.etree.SubElement(ctag, "Group", name=group) try: - datafile = open(os.path.join(self.data, 'probed.xml'), 'w') - datafile.write(lxml.etree.tostring( - top, xml_declaration=False, - pretty_print='true').decode('UTF-8')) + top.getroottree().write(os.path.join(self.data, 'probed.xml'), + xml_declaration=False, + pretty_print='true') except IOError: err = sys.exc_info()[1] self.logger.error("Failed to write probed.xml: %s" % err) @@ -234,9 +253,10 @@ class Probes(Bcfg2.Server.Plugin.Probing, if pdata.data != data: pdata.data = data pdata.save() + ProbesDataModel.objects.filter( hostname=client.hostname).exclude( - probe__in=self.probedata[client.hostname]).delete() + probe__in=self.probedata[client.hostname]).delete() for group in self.cgroups[client.hostname]: try: @@ -248,7 +268,7 @@ class Probes(Bcfg2.Server.Plugin.Probing, grp.save() ProbesGroupsModel.objects.filter( hostname=client.hostname).exclude( - group__in=self.cgroups[client.hostname]).delete() + group__in=self.cgroups[client.hostname]).delete() def load_data(self): """ Load probe data from the appropriate backend (probed.xml @@ -322,7 +342,7 @@ class Probes(Bcfg2.Server.Plugin.Probing, def ReceiveDataItem(self, client, data, cgroups, cprobedata): """Receive probe results pertaining to client.""" - if data.text == None: + if data.text is None: self.logger.info("Got null response to probe %s from %s" % (data.get('name'), client.hostname)) cprobedata[data.get('name')] = ProbeData('') diff --git a/src/lib/Bcfg2/Server/Plugins/Properties.py b/src/lib/Bcfg2/Server/Plugins/Properties.py index 762f9f8f0..f091acf01 100644 --- a/src/lib/Bcfg2/Server/Plugins/Properties.py +++ b/src/lib/Bcfg2/Server/Plugins/Properties.py @@ -216,8 +216,13 @@ class XMLPropertyFile(Bcfg2.Server.Plugin.StructFile, PropertyFile): return repr(self.xdata) -class PropDirectoryBacked(Bcfg2.Server.Plugin.DirectoryBacked): - """ A collection of properties files. """ +class Properties(Bcfg2.Server.Plugin.Plugin, + Bcfg2.Server.Plugin.Connector, + Bcfg2.Server.Plugin.DirectoryBacked): + """ The properties plugin maps property files into client metadata + instances. """ + + #: Extensions that are understood by Properties. extensions = ["xml"] if HAS_JSON: extensions.append("json") @@ -234,14 +239,17 @@ class PropDirectoryBacked(Bcfg2.Server.Plugin.DirectoryBacked): #: Ignore XML schema (``.xsd``) files ignore = re.compile(r'.*\.xsd$') - def __init__(self, data): - Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, data) + def __init__(self, core, datastore): + global SETUP # pylint: disable=W0603 + Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) + Bcfg2.Server.Plugin.Connector.__init__(self) + Bcfg2.Server.Plugin.DirectoryBacked.__init__(self, self.data) #: Instead of creating children of this object with a static #: object, we use :func:`property_dispatcher` to create a #: child of the appropriate subclass of :class:`PropertyFile` self.__child__ = self.property_dispatcher - __init__.__doc__ = Bcfg2.Server.Plugin.DirectoryBacked.__init__.__doc__ + __init__.__doc__ = Bcfg2.Server.Plugin.Plugin.__init__.__doc__ def property_dispatcher(self, fname): """ Dispatch an event on a Properties file to the @@ -262,28 +270,9 @@ class PropDirectoryBacked(Bcfg2.Server.Plugin.DirectoryBacked): raise Bcfg2.Server.Plugin.PluginExecutionError( "Properties: Unknown extension %s" % fname) - -class Properties(Bcfg2.Server.Plugin.Plugin, - Bcfg2.Server.Plugin.Connector): - """ The properties plugin maps property files into client metadata - instances. """ - - def __init__(self, core, datastore): - Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore) - Bcfg2.Server.Plugin.Connector.__init__(self) - try: - self.store = PropDirectoryBacked(self.data) - except OSError: - err = sys.exc_info()[1] - self.logger.error("Error while creating Properties store: %s" % - err) - raise Bcfg2.Server.Plugin.PluginInitError - - __init__.__doc__ = Bcfg2.Server.Plugin.Plugin.__init__.__doc__ - def get_additional_data(self, metadata): rv = dict() - for fname, pfile in self.store.entries.items(): + for fname, pfile in self.entries.items(): rv[fname] = pfile.get_additional_data(metadata) return rv get_additional_data.__doc__ = \ diff --git a/src/lib/Bcfg2/Server/Plugins/Reporting.py b/src/lib/Bcfg2/Server/Plugins/Reporting.py index a6dc2c1ef..3354763d4 100644 --- a/src/lib/Bcfg2/Server/Plugins/Reporting.py +++ b/src/lib/Bcfg2/Server/Plugins/Reporting.py @@ -92,10 +92,11 @@ class Reporting(Statistics, Threaded, PullSource, Debuggable): # try 3 times to store the data for i in [1, 2, 3]: try: - self.transport.store(client.hostname, cdata, - lxml.etree.tostring( + self.transport.store( + client.hostname, cdata, + lxml.etree.tostring( stats, - xml_declaration=False).decode('UTF-8')) + xml_declaration=False)) self.debug_log("%s: Queued statistics data for %s" % (self.__class__.__name__, client.hostname)) return diff --git a/src/lib/Bcfg2/Server/Plugins/SSHbase.py b/src/lib/Bcfg2/Server/Plugins/SSHbase.py index fb5bd50bf..1264fd1cf 100644 --- a/src/lib/Bcfg2/Server/Plugins/SSHbase.py +++ b/src/lib/Bcfg2/Server/Plugins/SSHbase.py @@ -202,10 +202,11 @@ class SSHbase(Bcfg2.Server.Plugin.Plugin, if specific.hostname and specific.hostname in names: hostnames = names[specific.hostname] elif specific.group: - hostnames = list(chain( + hostnames = list( + chain( *[names[cmeta.hostname] - for cmeta in \ - mquery.by_groups([specific.group])])) + for cmeta in + mquery.by_groups([specific.group])])) elif specific.all: # a generic key for all hosts? really? hostnames = list(chain(*list(names.values()))) diff --git a/src/lib/Bcfg2/Server/Plugins/SSLCA.py b/src/lib/Bcfg2/Server/Plugins/SSLCA.py index d52d9325c..b21732666 100644 --- a/src/lib/Bcfg2/Server/Plugins/SSLCA.py +++ b/src/lib/Bcfg2/Server/Plugins/SSLCA.py @@ -69,7 +69,7 @@ class SSLCACertSpec(SSLCAXMLSpec): def get_spec(self, metadata): rv = SSLCAXMLSpec.get_spec(self, metadata) rv['subjectaltname'] = [e.text for e in self.Match(metadata) - if e.tag == "SubjectAltName"] + if e.tag == "subjectAltName"] return rv diff --git a/src/lib/Bcfg2/Server/Plugins/ServiceCompat.py b/src/lib/Bcfg2/Server/Plugins/ServiceCompat.py index 0aea439f9..c3a2221f6 100644 --- a/src/lib/Bcfg2/Server/Plugins/ServiceCompat.py +++ b/src/lib/Bcfg2/Server/Plugins/ServiceCompat.py @@ -14,7 +14,7 @@ class ServiceCompat(Bcfg2.Server.Plugin.Plugin, def validate_goals(self, metadata, config): """ Apply defaults """ - if metadata.version_info and metadata.version_info > (1, 3, 0, '', 0): + if metadata.version_info and metadata.version_info >= (1, 3, 0, '', 0): # do not care about a client that is _any_ 1.3.0 release # (including prereleases and RCs) return diff --git a/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py b/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py index ad3eb65bc..e834759c2 100644 --- a/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py +++ b/src/lib/Bcfg2/Server/Plugins/TemplateHelper.py @@ -81,7 +81,7 @@ class TemplateHelper(Bcfg2.Server.Plugin.Plugin, Bcfg2.Server.Plugin.DirectoryBacked): """ A plugin to provide helper classes and functions to templates """ __author__ = 'chris.a.st.pierre@gmail.com' - ignore = re.compile("^(\.#.*|.*~|\\..*\\.(sw[px])|.*\.py[co])$") + ignore = re.compile(r'^(\.#.*|.*~|\..*\.(sw[px])|.*\.py[co])$') patterns = MODULE_RE __child__ = HelperModule diff --git a/src/lib/Bcfg2/Server/Plugins/__init__.py b/src/lib/Bcfg2/Server/Plugins/__init__.py index 1f85702f0..ad51cf368 100644 --- a/src/lib/Bcfg2/Server/Plugins/__init__.py +++ b/src/lib/Bcfg2/Server/Plugins/__init__.py @@ -1 +1,5 @@ -""" Bcfg2 Plugins """ +"""Imports for Bcfg2.Server.Plugins.""" + +from Bcfg2.Compat import walk_packages + +__all__ = [m[1] for m in walk_packages(path=__path__)] diff --git a/src/lib/Bcfg2/Server/SSLServer.py b/src/lib/Bcfg2/Server/SSLServer.py index 28450aa1a..13c756049 100644 --- a/src/lib/Bcfg2/Server/SSLServer.py +++ b/src/lib/Bcfg2/Server/SSLServer.py @@ -51,10 +51,11 @@ class XMLRPCDispatcher(SimpleXMLRPCServer.SimpleXMLRPCDispatcher): allow_none=self.allow_none, encoding=self.encoding) except: + err = sys.exc_info() self.logger.error("Unexpected handler error", exc_info=1) # report exception back to server raw_response = xmlrpclib.dumps( - xmlrpclib.Fault(1, "%s:%s" % (sys.exc_type, sys.exc_value)), + xmlrpclib.Fault(1, "%s:%s" % (err[0].__name__, err[1])), allow_none=self.allow_none, encoding=self.encoding) return raw_response @@ -199,8 +200,10 @@ class XMLRPCRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler): try: username, password = auth_content.split(":") except TypeError: + # pylint: disable=E0602 username, pw = auth_content.split(bytes(":", encoding='utf-8')) password = pw.decode('utf-8') + # pylint: enable=E0602 except ValueError: username = auth_content password = "" diff --git a/src/lib/Bcfg2/Server/Statistics.py b/src/lib/Bcfg2/Server/Statistics.py index 8a6ff54c4..dfb698b61 100644 --- a/src/lib/Bcfg2/Server/Statistics.py +++ b/src/lib/Bcfg2/Server/Statistics.py @@ -30,10 +30,10 @@ class Statistic(object): :param value: The value to add to this statistic :type value: int or float """ - self.min = min(self.min, value) - self.max = max(self.max, value) - self.ave = (((self.ave * (self.count - 1)) + value) / self.count) + self.min = min(self.min, float(value)) + self.max = max(self.max, float(value)) self.count += 1 + self.ave = (((self.ave * (self.count - 1)) + value) / self.count) def get_value(self): """ Get a tuple of all the stats tracked on this named item. @@ -48,6 +48,11 @@ class Statistic(object): """ return (self.name, (self.min, self.max, self.ave, self.count)) + def __repr__(self): + return "%s(%s, (min=%s, avg=%s, max=%s, count=%s))" % ( + self.__class__.__name__, + self.name, self.min, self.ave, self.max, self.count) + class Statistics(object): """ A collection of named :class:`Statistic` objects. """ diff --git a/src/lib/Bcfg2/Server/__init__.py b/src/lib/Bcfg2/Server/__init__.py index bcf3b4dea..6bd0ff9eb 100644 --- a/src/lib/Bcfg2/Server/__init__.py +++ b/src/lib/Bcfg2/Server/__init__.py @@ -1,9 +1,14 @@ """This is the set of modules for Bcfg2.Server.""" import lxml.etree +from Bcfg2.Compat import walk_packages +<<<<<<< HEAD __all__ = ["Admin", "Core", "FileMonitor", "Plugin", "Plugins", "Reports", "XMLParser", "XI", "XI_NAMESPACE"] +======= +__all__ = [m[1] for m in walk_packages(path=__path__)] +>>>>>>> maint XI = 'http://www.w3.org/2001/XInclude' XI_NAMESPACE = '{%s}' % XI diff --git a/src/lib/Bcfg2/Utils.py b/src/lib/Bcfg2/Utils.py index dd76f04d3..ef88a1a36 100644 --- a/src/lib/Bcfg2/Utils.py +++ b/src/lib/Bcfg2/Utils.py @@ -22,7 +22,7 @@ class ClassName(object): return owner.__name__ -class PackedDigitRange(object): +class PackedDigitRange(object): # pylint: disable=E0012,R0924 """ Representation of a set of integer ranges. A range is described by a comma-delimited string of integers and ranges, e.g.:: @@ -108,10 +108,16 @@ class ExecutorResult(object): def __init__(self, stdout, stderr, retval): #: The output of the command - self.stdout = stdout + if isinstance(stdout, str): + self.stdout = stdout + else: + self.stdout = stdout.decode('utf-8') #: The error produced by the command - self.stderr = stderr + if isinstance(stdout, str): + self.stderr = stderr + else: + self.stderr = stderr.decode('utf-8') #: The return value of the command. self.retval = retval @@ -145,6 +151,19 @@ class ExecutorResult(object): returned a tuple of (return value, stdout split by lines). """ return (self.retval, self.stdout.splitlines())[idx] + def __len__(self): + """ This provides compatibility with the old Executor, which + returned a tuple of (return value, stdout split by lines). """ + return 2 + + def __delitem__(self, _): + raise TypeError("'%s' object doesn't support item deletion" % + self.__class__.__name__) + + def __setitem__(self, idx, val): + raise TypeError("'%s' object does not support item assignment" % + self.__class__.__name__) + def __nonzero__(self): return self.__bool__() @@ -172,7 +191,7 @@ class Executor(object): :param proc: The process to kill upon timeout. :type proc: subprocess.Popen :returns: None """ - if proc.poll() == None: + if proc.poll() is None: try: proc.kill() self.logger.warning("Process exceeeded timeout, killing") @@ -216,6 +235,13 @@ class Executor(object): for line in inputdata.splitlines(): self.logger.debug('> %s' % line) (stdout, stderr) = proc.communicate(input=inputdata) + + # py3k fixes + if not isinstance(stdout, str): + stdout = stdout.decode('utf-8') + if not isinstance(stderr, str): + stderr = stderr.decode('utf-8') + for line in stdout.splitlines(): # pylint: disable=E1103 self.logger.debug('< %s' % line) for line in stderr.splitlines(): # pylint: disable=E1103 diff --git a/src/lib/Bcfg2/__init__.py b/src/lib/Bcfg2/__init__.py index 3fe2a0d75..74a871f2a 100644 --- a/src/lib/Bcfg2/__init__.py +++ b/src/lib/Bcfg2/__init__.py @@ -1,3 +1 @@ """Base modules definition.""" - -__all__ = ['Server', 'Client', 'Logger', 'Options', 'Proxy', 'Statistics'] diff --git a/src/lib/Bcfg2/settings.py b/src/lib/Bcfg2/settings.py index 87f2a0df0..f3697d66f 100644 --- a/src/lib/Bcfg2/settings.py +++ b/src/lib/Bcfg2/settings.py @@ -54,11 +54,11 @@ DEFAULT_CONFIG = _default_config() def read_config(cfile=DEFAULT_CONFIG, repo=None): """ read the config file and set django settings based on it """ - # pylint: disable=W0603 + # pylint: disable=W0602,W0603 global DATABASE_ENGINE, DATABASE_NAME, DATABASE_USER, DATABASE_PASSWORD, \ DATABASE_HOST, DATABASE_PORT, DEBUG, TEMPLATE_DEBUG, TIME_ZONE, \ MEDIA_URL - # pylint: enable=W0603 + # pylint: enable=W0602,W0603 if not os.path.exists(cfile) and os.path.exists(DEFAULT_CONFIG): print("%s does not exist, using %s for database configuration" % diff --git a/src/lib/Bcfg2/version.py b/src/lib/Bcfg2/version.py index 6f3ba3e49..12fc584fe 100644 --- a/src/lib/Bcfg2/version.py +++ b/src/lib/Bcfg2/version.py @@ -5,7 +5,7 @@ import re __version__ = "1.3.1" -class Bcfg2VersionInfo(tuple): +class Bcfg2VersionInfo(tuple): # pylint: disable=E0012,R0924 """ object to make granular version operations (particularly comparisons) easier """ diff --git a/src/sbin/bcfg2-crypt b/src/sbin/bcfg2-crypt index f7deba90c..c4d5f0747 100755 --- a/src/sbin/bcfg2-crypt +++ b/src/sbin/bcfg2-crypt @@ -337,7 +337,7 @@ class PropertiesEncryptor(Encryptor): # actually need to unchunk anything xdata = data[0] # find root element - while xdata.getparent() != None: + while xdata.getparent() is not None: xdata = xdata.getparent() return lxml.etree.tostring(xdata, xml_declaration=False, diff --git a/src/sbin/bcfg2-info b/src/sbin/bcfg2-info index ad35bbeeb..101530cac 100755 --- a/src/sbin/bcfg2-info +++ b/src/sbin/bcfg2-info @@ -120,7 +120,6 @@ class InfoCore(cmd.Cmd, Bcfg2.Server.Core.BaseCore): Bcfg2.Server.Core.BaseCore.__init__(self) self.prompt = '> ' self.cont = True - self.fam.handle_events_in_interval(4) def _get_client_list(self, hostglobs): """ given a host glob, get a list of clients that match it """ @@ -379,7 +378,7 @@ Bcfg2 client itself.""") xml_declaration=False).decode('UTF-8') except Exception: print("Failed to build entry %s for host %s: %s" % - (fname, client, traceback.format_exc().splitlines()[-1])) + (fname, client, traceback.format_exc().splitlines()[-1])) raise try: if outfile: @@ -457,9 +456,7 @@ Bcfg2 client itself.""") def do_clients(self, _): """ clients - Print out client/profile info """ data = [('Client', 'Profile')] - clist = self.metadata.clients - clist.sort() - for client in clist: + for client in sorted(self.metadata.list_clients()): imd = self.metadata.get_initial_metadata(client) data.append((client, imd.profile)) print_tabular(data) @@ -467,19 +464,18 @@ Bcfg2 client itself.""") def do_config(self, _): """ config - Print out the current configuration of Bcfg2""" output = [ - ('Description', 'Value'), - ('Path Bcfg2 repository', self.setup['repo']), - ('Plugins', self.setup['plugins']), - ('Password', self.setup['password']), - ('Server Metadata Connector', self.setup['mconnect']), - ('Filemonitor', self.setup['filemonitor']), - ('Server address', self.setup['location']), - ('Path to key', self.setup['key']), - ('Path to SSL certificate', self.setup['cert']), - ('Path to SSL CA certificate', self.setup['ca']), - ('Protocol', self.setup['protocol']), - ('Logging', self.setup['logging']) - ] + ('Description', 'Value'), + ('Path Bcfg2 repository', self.setup['repo']), + ('Plugins', self.setup['plugins']), + ('Password', self.setup['password']), + ('Server Metadata Connector', self.setup['mconnect']), + ('Filemonitor', self.setup['filemonitor']), + ('Server address', self.setup['location']), + ('Path to key', self.setup['key']), + ('Path to SSL certificate', self.setup['cert']), + ('Path to SSL CA certificate', self.setup['ca']), + ('Protocol', self.setup['protocol']), + ('Logging', self.setup['logging'])] print_tabular(output) def do_probes(self, args): @@ -606,7 +602,7 @@ Bcfg2 client itself.""") # Dump all mappings unless type specified data = [('Plugin', 'Type', 'Name')] arglen = len(args.split()) - for generator in self.generators: + for generator in self.plugins_by_type(Bcfg2.Server.Plugin.Generator): if arglen == 0: etypes = list(generator.Entries.keys()) else: @@ -712,6 +708,8 @@ Bcfg2 client itself.""") def run(self, args): # pylint: disable=W0221 try: + self.load_plugins() + self.fam.handle_events_in_interval(1) if args: self.onecmd(" ".join(args)) else: @@ -738,7 +736,7 @@ def build_usage(): # shim for python 2.4, __func__ is im_func funcattr = getattr(attr, "__func__", getattr(attr, "im_func", None)) - if (funcattr != None and + if (funcattr is not None and funcattr.func_name not in cmd_blacklist and funcattr.func_name.startswith("do_") and funcattr.func_doc): diff --git a/src/sbin/bcfg2-report-collector b/src/sbin/bcfg2-report-collector index 403775251..ae6d3b167 100755 --- a/src/sbin/bcfg2-report-collector +++ b/src/sbin/bcfg2-report-collector @@ -12,12 +12,10 @@ from Bcfg2.Reporting.Collector import ReportingCollector, ReportingError def main(): logger = logging.getLogger('bcfg2-report-collector') - optinfo = dict( - daemon=Bcfg2.Options.DAEMON, - repo=Bcfg2.Options.SERVER_REPOSITORY, - filemonitor=Bcfg2.Options.SERVER_FILEMONITOR, - web_configfile=Bcfg2.Options.WEB_CFILE, - ) + optinfo = dict(daemon=Bcfg2.Options.DAEMON, + repo=Bcfg2.Options.SERVER_REPOSITORY, + filemonitor=Bcfg2.Options.SERVER_FILEMONITOR, + web_configfile=Bcfg2.Options.WEB_CFILE) optinfo.update(Bcfg2.Options.CLI_COMMON_OPTIONS) optinfo.update(Bcfg2.Options.REPORTING_COMMON_OPTIONS) setup = Bcfg2.Options.load_option_parser(optinfo) diff --git a/src/sbin/bcfg2-reports b/src/sbin/bcfg2-reports index 2c4a918be..bb45e0009 100755 --- a/src/sbin/bcfg2-reports +++ b/src/sbin/bcfg2-reports @@ -233,7 +233,8 @@ def main(): try: entries = [l.strip().split(":") for l in open(options.file)] - except IOError, err: + except IOError: + err = sys.exc_info()[1] print("Cannot read entries from %s: %s" % (options.file, err)) return 2 diff --git a/src/sbin/bcfg2-yum-helper b/src/sbin/bcfg2-yum-helper index 7e5c03fd5..7dbdad16b 100755 --- a/src/sbin/bcfg2-yum-helper +++ b/src/sbin/bcfg2-yum-helper @@ -9,33 +9,13 @@ import os import sys import yum import logging +import Bcfg2.Logger from optparse import OptionParser try: import json except ImportError: import simplejson as json -LOGGER = None - - -def get_logger(verbose=0): - """ set up logging according to the verbose level given on the - command line """ - global LOGGER - if LOGGER is None: - LOGGER = logging.getLogger(sys.argv[0]) - stderr = logging.StreamHandler() - if verbose: - level = logging.DEBUG - else: - level = logging.WARNING - LOGGER.setLevel(level) - LOGGER.addHandler(stderr) - syslog = logging.handlers.SysLogHandler("/dev/log") - syslog.setFormatter(logging.Formatter("%(name)s: %(message)s")) - LOGGER.addHandler(syslog) - return LOGGER - def pkg_to_tuple(package): """ json doesn't distinguish between tuples and lists, but yum @@ -76,7 +56,7 @@ class DepSolver(object): except AttributeError: self.yumbase._getConfig(cfgfile, debuglevel=verbose) # pylint: enable=E1121,W0212 - self.logger = get_logger(verbose) + self.logger = logging.getLogger(self.__class__.__name__) self._groups = None def get_groups(self): @@ -220,7 +200,17 @@ def main(): parser.add_option("-v", "--verbose", help="Verbosity level", action="count") (options, args) = parser.parse_args() - logger = get_logger(options.verbose) + + if options.verbose: + level = logging.DEBUG + clevel = logging.DEBUG + else: + level = logging.WARNING + clevel = logging.INFO + Bcfg2.Logger.setup_logging('bcfg2-yum-helper', to_syslog=True, + to_console=clevel, level=level) + logger = logging.getLogger('bcfg2-yum-helper') + try: cmd = args[0] except IndexError: diff --git a/testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/TestFile.py b/testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/TestFile.py index 662e0e1b6..8f933e08f 100644 --- a/testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/TestFile.py +++ b/testsuite/Testsrc/Testlib/TestClient/TestTools/TestPOSIX/TestFile.py @@ -64,10 +64,18 @@ class TestPOSIXFile(TestPOSIXTool): self.assertEqual(ptool._get_data(entry), ("test", True)) entry = copy.deepcopy(orig_entry) + entry.set("encoding", "base64") + entry.set("empty", "true") + self.assertEqual(ptool._get_data(entry), ("", True)) + + entry = copy.deepcopy(orig_entry) entry.set("empty", "true") self.assertEqual(ptool._get_data(entry), ("", False)) entry = copy.deepcopy(orig_entry) + self.assertEqual(ptool._get_data(entry), ("", False)) + + entry = copy.deepcopy(orig_entry) entry.text = "test" self.assertEqual(ptool._get_data(entry), ("test", False)) diff --git a/testsuite/Testsrc/Testlib/TestServer/TestPlugin/Testbase.py b/testsuite/Testsrc/Testlib/TestServer/TestPlugin/Testbase.py index a1e624824..318f5ceaa 100644 --- a/testsuite/Testsrc/Testlib/TestServer/TestPlugin/Testbase.py +++ b/testsuite/Testsrc/Testlib/TestServer/TestPlugin/Testbase.py @@ -72,14 +72,32 @@ class TestPlugin(TestDebuggable): if core is None: core = Mock() core.setup = MagicMock() - return self.test_obj(core, datastore) + @patchIf(not isinstance(os.makedirs, Mock), "os.makedirs", Mock()) + def inner(): + return self.test_obj(core, datastore) + return inner() - def test__init(self): + @patch("os.makedirs") + @patch("os.path.exists") + def test__init(self, mock_exists, mock_makedirs): core = Mock() core.setup = MagicMock() + + mock_exists.return_value = True + p = self.get_obj(core=core) + self.assertEqual(p.data, os.path.join(datastore, p.name)) + self.assertEqual(p.core, core) + mock_exists.assert_any_call(p.data) + self.assertFalse(mock_makedirs.called) + + mock_exists.reset_mock() + mock_makedirs.reset_mock() + mock_exists.return_value = False p = self.get_obj(core=core) self.assertEqual(p.data, os.path.join(datastore, p.name)) self.assertEqual(p.core, core) + mock_exists.assert_any_call(p.data) + mock_makedirs.assert_any_call(p.data) @patch("os.makedirs") def test_init_repo(self, mock_makedirs): diff --git a/testsuite/Testsrc/Testlib/TestServer/TestPlugin/Testhelpers.py b/testsuite/Testsrc/Testlib/TestServer/TestPlugin/Testhelpers.py index 6187880b7..929f665b1 100644 --- a/testsuite/Testsrc/Testlib/TestServer/TestPlugin/Testhelpers.py +++ b/testsuite/Testsrc/Testlib/TestServer/TestPlugin/Testhelpers.py @@ -133,7 +133,11 @@ class TestDirectoryBacked(Bcfg2TestCase): """ ensure that the child object has the correct interface """ self.assertTrue(hasattr(self.test_obj.__child__, "HandleEvent")) - def get_obj(self): + @patch("os.makedirs", Mock()) + def get_obj(self, fam=None): + if fam is None: + fam = Mock() + @patch("%s.%s.add_directory_monitor" % (self.test_obj.__module__, self.test_obj.__name__), Mock()) @@ -142,12 +146,26 @@ class TestDirectoryBacked(Bcfg2TestCase): self.test_obj.__name__)) return inner() - def test__init(self): + @patch("os.makedirs") + @patch("os.path.exists") + def test__init(self, mock_exists, mock_makedirs): @patch("%s.%s.add_directory_monitor" % (self.test_obj.__module__, self.test_obj.__name__)) def inner(mock_add_monitor): db = self.test_obj(datastore) + mock_exists.return_value = True + mock_add_monitor.assert_called_with('') + mock_exists.assert_called_with(db.data) + self.assertFalse(mock_makedirs.called) + + mock_add_monitor.reset_mock() + mock_exists.reset_mock() + mock_makedirs.reset_mock() + mock_exists.return_value = False + db = self.test_obj(datastore) mock_add_monitor.assert_called_with('') + mock_exists.assert_called_with(db.data) + mock_makedirs.assert_called_with(db.data) inner() @@ -367,20 +385,24 @@ class TestXMLFileBacked(TestFileBacked): def get_obj(self, path=None, should_monitor=False): if path is None: path = self.path - return self.test_obj(path, should_monitor=should_monitor) + + @patchIf(not isinstance(os.path.exists, Mock), + "os.path.exists", Mock()) + def inner(): + return self.test_obj(path, should_monitor=should_monitor) + return inner() @patch("Bcfg2.Server.FileMonitor.get_fam") def test__init(self, mock_get_fam): xfb = self.get_obj() self.assertEqual(xfb.fam, mock_get_fam.return_value) - if self.should_monitor is not True: - xfb = self.get_obj() - self.assertFalse(xfb.fam.AddMonitor.called) - - if self.should_monitor is not False: + if self.should_monitor: xfb = self.get_obj(should_monitor=True) xfb.fam.AddMonitor.assert_called_with(self.path, xfb) + else: + xfb = self.get_obj() + self.assertFalse(xfb.fam.AddMonitor.called) @patch("glob.glob") @patch("lxml.etree.parse") @@ -571,21 +593,21 @@ class TestXMLFileBacked(TestFileBacked): def test_add_monitor(self): xfb = self.get_obj() xfb.add_monitor("/test/test2.xml") - self.assertIn("/test/test2.xml", xfb.extras) + self.assertIn("/test/test2.xml", xfb.extra_monitors) if self.should_monitor is not True: xfb = self.get_obj() xfb.fam = Mock() xfb.add_monitor("/test/test3.xml") self.assertFalse(xfb.fam.AddMonitor.called) - self.assertIn("/test/test3.xml", xfb.extras) + self.assertIn("/test/test3.xml", xfb.extra_monitors) if self.should_monitor is not False: xfb = self.get_obj(should_monitor=True) xfb.fam = Mock() xfb.add_monitor("/test/test4.xml") xfb.fam.AddMonitor.assert_called_with("/test/test4.xml", xfb) - self.assertIn("/test/test4.xml", xfb.extras) + self.assertIn("/test/test4.xml", xfb.extra_monitors) class TestStructFile(TestXMLFileBacked): @@ -1370,13 +1392,18 @@ class TestXMLDirectoryBacked(TestDirectoryBacked): class TestPrioDir(TestPlugin, TestGenerator, TestXMLDirectoryBacked): test_obj = PrioDir - @patch("Bcfg2.Server.Plugin.helpers.%s.add_directory_monitor" % - test_obj.__name__, - Mock()) def get_obj(self, core=None): if core is None: core = Mock() - return self.test_obj(core, datastore) + + @patch("%s.%s.add_directory_monitor" % + (self.test_obj.__module__, self.test_obj.__name__), + Mock()) + @patchIf(not isinstance(os.makedirs, Mock), "os.makedirs", Mock()) + def inner(): + return self.test_obj(core, datastore) + + return inner() def test_HandleEvent(self): TestXMLDirectoryBacked.test_HandleEvent(self) @@ -1622,7 +1649,8 @@ class TestEntrySet(TestDebuggable): bogus))) for ignore in self.ignore: - self.assertTrue(eset.ignore.match(ignore)) + self.assertTrue(eset.ignore.match(ignore), + "%s should be ignored but wasn't" % ignore) self.assertFalse(eset.ignore.match(basename)) self.assertFalse(eset.ignore.match(basename + ".G20_foo")) @@ -1961,6 +1989,7 @@ class TestGroupSpool(TestPlugin, TestGenerator): return inner() def test__init(self): + @patchIf(not isinstance(os.makedirs, Mock), "os.makedirs", Mock()) @patch("%s.%s.AddDirectoryMonitor" % (self.test_obj.__module__, self.test_obj.__name__)) def inner(mock_Add): diff --git a/testsuite/Testsrc/Testlib/TestServer/TestPlugin/Testinterfaces.py b/testsuite/Testsrc/Testlib/TestServer/TestPlugin/Testinterfaces.py index 6effe05de..ac0454f84 100644 --- a/testsuite/Testsrc/Testlib/TestServer/TestPlugin/Testinterfaces.py +++ b/testsuite/Testsrc/Testlib/TestServer/TestPlugin/Testinterfaces.py @@ -97,11 +97,6 @@ class TestProbing(Bcfg2TestCase): class TestStatistics(TestPlugin): test_obj = Statistics - def get_obj(self, core=None): - if core is None: - core = Mock() - return self.test_obj(core, datastore) - def test_process_statistics(self): s = self.get_obj() self.assertRaises(NotImplementedError, @@ -354,12 +349,6 @@ class TestGoalValidator(Bcfg2TestCase): class TestVersion(TestPlugin): test_obj = Version - def get_obj(self, core=None): - if core is None: - core = Mock() - core.setup = MagicMock() - return self.test_obj(core, datastore) - def test_get_revision(self): d = self.get_obj() self.assertRaises(NotImplementedError, d.get_revision) diff --git a/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgGenshiGenerator.py b/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgGenshiGenerator.py index 154d6a8db..b73670fb7 100644 --- a/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgGenshiGenerator.py +++ b/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgGenshiGenerator.py @@ -2,6 +2,7 @@ import os import sys import lxml.etree from mock import Mock, MagicMock, patch +import Bcfg2.Server.Plugins.Cfg.CfgGenshiGenerator from Bcfg2.Server.Plugins.Cfg.CfgGenshiGenerator import * from Bcfg2.Server.Plugin import PluginExecutionError diff --git a/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestGroupPatterns.py b/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestGroupPatterns.py index a9346156c..c6e6f5ef7 100644 --- a/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestGroupPatterns.py +++ b/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestGroupPatterns.py @@ -92,7 +92,12 @@ class TestPatternFile(TestXMLFileBacked): core.fam = fam elif not core: core = Mock() - return self.test_obj(path, core=core) + + @patchIf(not isinstance(lxml.etree.Element, Mock), + "lxml.etree.Element", Mock()) + def inner(): + return self.test_obj(path, core=core) + return inner() @patch("Bcfg2.Server.Plugins.GroupPatterns.PatternMap") def test_Index(self, mock_PatternMap): @@ -135,6 +140,14 @@ class TestPatternFile(TestXMLFileBacked): class TestGroupPatterns(TestPlugin, TestConnector): test_obj = GroupPatterns + def get_obj(self, core=None): + @patchIf(not isinstance(lxml.etree.Element, Mock), + "lxml.etree.Element", Mock()) + def inner(): + return TestPlugin.get_obj(self, core=core) + return inner() + + def test_get_additional_groups(self): gp = self.get_obj() gp.config = Mock() diff --git a/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestMetadata.py b/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestMetadata.py index 221eb8a3c..a9e9d9701 100644 --- a/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestMetadata.py +++ b/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestMetadata.py @@ -94,7 +94,13 @@ def get_metadata_object(core=None, watch_clients=False, use_db=False): core.setup = MagicMock() core.metadata_cache = MagicMock() core.setup.cfp.getboolean = Mock(return_value=use_db) - return Metadata(core, datastore, watch_clients=watch_clients) + + @patchIf(not isinstance(os.makedirs, Mock), "os.makedirs", Mock()) + @patchIf(not isinstance(lxml.etree.Element, Mock), + "lxml.etree.Element", Mock()) + def inner(): + return Metadata(core, datastore, watch_clients=watch_clients) + return inner() class TestMetadataDB(DBModelTestCase): @@ -203,7 +209,11 @@ class TestXMLMetadataConfig(TestXMLFileBacked): def get_obj(self, basefile="clients.xml", core=None, watch_clients=False): self.metadata = get_metadata_object(core=core, watch_clients=watch_clients) - return XMLMetadataConfig(self.metadata, watch_clients, basefile) + @patchIf(not isinstance(lxml.etree.Element, Mock), + "lxml.etree.Element", Mock()) + def inner(): + return XMLMetadataConfig(self.metadata, watch_clients, basefile) + return inner() @patch("Bcfg2.Server.FileMonitor.get_fam", Mock()) def test__init(self): @@ -1531,7 +1541,11 @@ class TestMetadata_ClientsXML(TestMetadataBase): metadata = self.get_obj() fam = Bcfg2.Server.FileMonitor._FAM Bcfg2.Server.FileMonitor._FAM = MagicMock() - metadata.clients_xml = metadata._handle_file("clients.xml") + @patchIf(not isinstance(lxml.etree.Element, Mock), + "lxml.etree.Element", Mock()) + def inner(): + metadata.clients_xml = metadata._handle_file("clients.xml") + inner() metadata = TestMetadata.load_clients_data(self, metadata=metadata, xdata=xdata) rv = TestMetadataBase.load_clients_data(self, metadata=metadata, diff --git a/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestProbes.py b/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestProbes.py index 958dba4ff..30b08ef2f 100644 --- a/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestProbes.py +++ b/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestProbes.py @@ -1,7 +1,9 @@ import os import sys +import copy import time import lxml.etree +import Bcfg2.version import Bcfg2.Server import Bcfg2.Server.Plugin from mock import Mock, MagicMock, patch @@ -25,6 +27,47 @@ test_data = dict(a=1, b=[1, 2, 3], c="test", d=dict(a=1, b=dict(a=1), c=(1, "2", 3))) +class FakeElement(lxml.etree._Element): + getroottree = Mock() + + def __init__(self, el): + self._element = el + + def __getattribute__(self, attr): + el = lxml.etree._Element.__getattribute__(self, + '__dict__')['_element'] + if attr == "getroottree": + return FakeElement.getroottree + elif attr == "_element": + return el + else: + return getattr(el, attr) + + +class StoringElement(object): + OriginalElement = copy.copy(lxml.etree.Element) + + def __init__(self): + self.element = None + self.return_value = None + + def __call__(self, *args, **kwargs): + self.element = self.OriginalElement(*args, **kwargs) + self.return_value = FakeElement(self.element) + return self.return_value + + +class StoringSubElement(object): + OriginalSubElement = copy.copy(lxml.etree.SubElement) + + def __call__(self, parent, tag, **kwargs): + try: + return self.OriginalSubElement(parent._element, tag, + **kwargs) + except AttributeError: + return self.OriginalSubElement(parent, tag, **kwargs) + + class FakeList(list): pass @@ -173,6 +216,8 @@ group-specific""" ps.get_matching.return_value = matching metadata = Mock() + metadata.version_info = \ + Bcfg2.version.Bcfg2VersionInfo(Bcfg2.version.__version__) pdata = ps.get_probe_data(metadata) ps.get_matching.assert_called_with(metadata) # we can't create a matching operator.attrgetter object, and I @@ -199,9 +244,7 @@ class TestProbes(TestProbing, TestConnector, TestDatabaseBacked): test_obj = Probes def get_obj(self, core=None): - if core is None: - core = MagicMock() - return self.test_obj(core, datastore) + return TestDatabaseBacked.get_obj(self, core=core) def get_test_probedata(self): test_xdata = lxml.etree.Element("test") @@ -245,9 +288,10 @@ text # test__init(), which relies on being able to check the calls # of load_data(), and thus on load_data() being consistently # mocked) - @patch("Bcfg2.Server.Plugins.Probes.Probes.load_data", new=load_data) + @patch("%s.%s.load_data" % (self.test_obj.__module__, + self.test_obj.__name__), new=load_data) def inner(): - return Probes(core, datastore) + return self.get_obj(core) return inner() @@ -284,61 +328,71 @@ text probes._write_data_db.assert_called_with("test") self.assertFalse(probes._write_data_xml.called) - @patch("%s.open" % builtins) - def test__write_data_xml(self, mock_open): + def test__write_data_xml(self): probes = self.get_probes_object(use_db=False) probes.probedata = self.get_test_probedata() probes.cgroups = self.get_test_cgroups() - probes._write_data_xml(None) - - mock_open.assert_called_with(os.path.join(datastore, probes.name, - "probed.xml"), "w") - data = lxml.etree.XML(mock_open.return_value.write.call_args[0][0]) - self.assertEqual(len(data.xpath("//Client")), 2) - - foodata = data.find("Client[@name='foo.example.com']") - self.assertIsNotNone(foodata) - self.assertIsNotNone(foodata.get("timestamp")) - self.assertEqual(len(foodata.findall("Probe")), - len(probes.probedata['foo.example.com'])) - self.assertEqual(len(foodata.findall("Group")), - len(probes.cgroups['foo.example.com'])) - xml = foodata.find("Probe[@name='xml']") - self.assertIsNotNone(xml) - self.assertIsNotNone(xml.get("value")) - xdata = lxml.etree.XML(xml.get("value")) - self.assertIsNotNone(xdata) - self.assertIsNotNone(xdata.find("test")) - self.assertEqual(xdata.find("test").get("foo"), "foo") - text = foodata.find("Probe[@name='text']") - self.assertIsNotNone(text) - self.assertIsNotNone(text.get("value")) - multiline = foodata.find("Probe[@name='multiline']") - self.assertIsNotNone(multiline) - self.assertIsNotNone(multiline.get("value")) - self.assertGreater(len(multiline.get("value").splitlines()), 1) - - bardata = data.find("Client[@name='bar.example.com']") - self.assertIsNotNone(bardata) - self.assertIsNotNone(bardata.get("timestamp")) - self.assertEqual(len(bardata.findall("Probe")), - len(probes.probedata['bar.example.com'])) - self.assertEqual(len(bardata.findall("Group")), - len(probes.cgroups['bar.example.com'])) - empty = bardata.find("Probe[@name='empty']") - self.assertIsNotNone(empty) - self.assertIsNotNone(empty.get("value")) - self.assertEqual(empty.get("value"), "") - if HAS_JSON: - jdata = bardata.find("Probe[@name='json']") - self.assertIsNotNone(jdata) - self.assertIsNotNone(jdata.get("value")) - self.assertItemsEqual(test_data, json.loads(jdata.get("value"))) - if HAS_YAML: - ydata = bardata.find("Probe[@name='yaml']") - self.assertIsNotNone(ydata) - self.assertIsNotNone(ydata.get("value")) - self.assertItemsEqual(test_data, yaml.load(ydata.get("value"))) + + @patch("lxml.etree.Element") + @patch("lxml.etree.SubElement", StoringSubElement()) + def inner(mock_Element): + mock_Element.side_effect = StoringElement() + probes._write_data_xml(None) + + top = mock_Element.side_effect.return_value + write = top.getroottree.return_value.write + self.assertEqual(write.call_args[0][0], + os.path.join(datastore, probes.name, + "probed.xml")) + + data = top._element + foodata = data.find("Client[@name='foo.example.com']") + self.assertIsNotNone(foodata) + self.assertIsNotNone(foodata.get("timestamp")) + self.assertEqual(len(foodata.findall("Probe")), + len(probes.probedata['foo.example.com'])) + self.assertEqual(len(foodata.findall("Group")), + len(probes.cgroups['foo.example.com'])) + xml = foodata.find("Probe[@name='xml']") + self.assertIsNotNone(xml) + self.assertIsNotNone(xml.get("value")) + xdata = lxml.etree.XML(xml.get("value")) + self.assertIsNotNone(xdata) + self.assertIsNotNone(xdata.find("test")) + self.assertEqual(xdata.find("test").get("foo"), "foo") + text = foodata.find("Probe[@name='text']") + self.assertIsNotNone(text) + self.assertIsNotNone(text.get("value")) + multiline = foodata.find("Probe[@name='multiline']") + self.assertIsNotNone(multiline) + self.assertIsNotNone(multiline.get("value")) + self.assertGreater(len(multiline.get("value").splitlines()), 1) + + bardata = data.find("Client[@name='bar.example.com']") + self.assertIsNotNone(bardata) + self.assertIsNotNone(bardata.get("timestamp")) + self.assertEqual(len(bardata.findall("Probe")), + len(probes.probedata['bar.example.com'])) + self.assertEqual(len(bardata.findall("Group")), + len(probes.cgroups['bar.example.com'])) + empty = bardata.find("Probe[@name='empty']") + self.assertIsNotNone(empty) + self.assertIsNotNone(empty.get("value")) + self.assertEqual(empty.get("value"), "") + if HAS_JSON: + jdata = bardata.find("Probe[@name='json']") + self.assertIsNotNone(jdata) + self.assertIsNotNone(jdata.get("value")) + self.assertItemsEqual(test_data, + json.loads(jdata.get("value"))) + if HAS_YAML: + ydata = bardata.find("Probe[@name='yaml']") + self.assertIsNotNone(ydata) + self.assertIsNotNone(ydata.get("value")) + self.assertItemsEqual(test_data, + yaml.load(ydata.get("value"))) + + inner() @skipUnless(HAS_DJANGO, "Django not found, skipping") def test__write_data_db(self): @@ -410,18 +464,24 @@ text probes._load_data_db.assert_any_call() self.assertFalse(probes._load_data_xml.called) - @patch("%s.open" % builtins) @patch("lxml.etree.parse") - def test__load_data_xml(self, mock_parse, mock_open): + def test__load_data_xml(self, mock_parse): probes = self.get_probes_object(use_db=False) - # to get the value for lxml.etree.parse to parse, we call - # _write_data_xml, mock the open() call, and grab the data - # that gets "written" to probed.xml probes.probedata = self.get_test_probedata() probes.cgroups = self.get_test_cgroups() - probes._write_data_xml(None) - xdata = \ - lxml.etree.XML(str(mock_open.return_value.write.call_args[0][0])) + + # to get the value for lxml.etree.parse to parse, we call + # _write_data_xml, mock the lxml.etree._ElementTree.write() + # call, and grab the data that gets "written" to probed.xml + @patch("lxml.etree.Element") + @patch("lxml.etree.SubElement", StoringSubElement()) + def inner(mock_Element): + mock_Element.side_effect = StoringElement() + probes._write_data_xml(None) + top = mock_Element.side_effect.return_value + return top._element + + xdata = inner() mock_parse.return_value = xdata.getroottree() probes.probedata = dict() probes.cgroups = dict() @@ -559,5 +619,3 @@ text metadata.hostname = "nonexistent" self.assertEqual(probes.get_additional_data(metadata), ClientProbeDataSet()) - - diff --git a/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestProperties.py b/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestProperties.py index b63d08524..92dc85fb1 100644 --- a/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestProperties.py +++ b/testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestProperties.py @@ -295,8 +295,8 @@ class TestXMLPropertyFile(TestPropertyFile, TestStructFile): self.assertFalse(mock_copy.called) -class TestPropDirectoryBacked(TestDirectoryBacked): - test_obj = PropDirectoryBacked +class TestProperties(TestPlugin, TestConnector, TestDirectoryBacked): + test_obj = Properties testfiles = ['foo.xml', 'bar.baz.xml'] if HAS_JSON: testfiles.extend(["foo.json", "foo.xml.json"]) @@ -305,16 +305,13 @@ class TestPropDirectoryBacked(TestDirectoryBacked): ignore = ['foo.xsd', 'bar.baz.xsd', 'quux.xml.xsd'] badevents = ['bogus.txt'] - -class TestProperties(TestPlugin, TestConnector): - test_obj = Properties - - def test__init(self): - TestPlugin.test__init(self) - - core = Mock() - p = self.get_obj(core=core) - self.assertIsInstance(p.store, PropDirectoryBacked) + def get_obj(self, core=None): + @patch("%s.%s.add_directory_monitor" % (self.test_obj.__module__, + self.test_obj.__name__), + Mock()) + def inner(): + return TestPlugin.get_obj(self, core=core) + return inner() @patch("copy.copy") def test_get_additional_data(self, mock_copy): @@ -322,11 +319,11 @@ class TestProperties(TestPlugin, TestConnector): p = self.get_obj() metadata = Mock() - p.store.entries = {"foo.xml": Mock(), - "foo.yml": Mock()} + p.entries = {"foo.xml": Mock(), + "foo.yml": Mock()} rv = p.get_additional_data(metadata) expected = dict() - for name, entry in p.store.entries.items(): + for name, entry in p.entries.items(): entry.get_additional_data.assert_called_with(metadata) expected[name] = entry.get_additional_data.return_value self.assertItemsEqual(rv, expected) diff --git a/testsuite/Testsrc/Testlib/TestStatistics.py b/testsuite/Testsrc/Testlib/TestStatistics.py new file mode 100644 index 000000000..496cbac28 --- /dev/null +++ b/testsuite/Testsrc/Testlib/TestStatistics.py @@ -0,0 +1,44 @@ +import os +import sys +from mock import Mock, MagicMock, patch + +# add all parent testsuite directories to sys.path to allow (most) +# relative imports in python 2.4 +path = os.path.dirname(__file__) +while path != "/": + if os.path.basename(path).lower().startswith("test"): + sys.path.append(path) + if os.path.basename(path) == "testsuite": + break + path = os.path.dirname(path) +from common import * + +from Bcfg2.Statistics import * + + +class TestStatistic(Bcfg2TestCase): + def test_stat(self): + stat = Statistic("test", 1) + self.assertEqual(stat.get_value(), ("test", (1.0, 1.0, 1.0, 1))) + stat.add_value(10) + self.assertEqual(stat.get_value(), ("test", (1.0, 10.0, 5.5, 2))) + stat.add_value(100) + self.assertEqual(stat.get_value(), ("test", (1.0, 100.0, 37.0, 3))) + stat.add_value(12.345) + self.assertEqual(stat.get_value(), ("test", (1.0, 100.0, 30.83625, 4))) + stat.add_value(0.655) + self.assertEqual(stat.get_value(), ("test", (0.655, 100.0, 24.8, 5))) + + +class TestStatistics(Bcfg2TestCase): + def test_stats(self): + stats = Statistics() + self.assertEqual(stats.display(), dict()) + stats.add_value("test1", 1) + self.assertEqual(stats.display(), dict(test1=(1.0, 1.0, 1.0, 1))) + stats.add_value("test2", 1.23) + self.assertEqual(stats.display(), dict(test1=(1.0, 1.0, 1.0, 1), + test2=(1.23, 1.23, 1.23, 1))) + stats.add_value("test1", 10) + self.assertEqual(stats.display(), dict(test1=(1.0, 10.0, 5.5, 2), + test2=(1.23, 1.23, 1.23, 1))) diff --git a/testsuite/before_install.sh b/testsuite/before_install.sh index 884971e45..5f1a59aaf 100755 --- a/testsuite/before_install.sh +++ b/testsuite/before_install.sh @@ -2,9 +2,12 @@ # before_install script for Travis-CI +PYVER=$(python -c 'import sys;print(".".join(str(v) for v in sys.version_info[0:2]))') + sudo apt-get update -qq -sudo apt-get install -qq swig pylint libxml2-utils +sudo apt-get install -qq swig libxml2-utils if [[ "$WITH_OPTIONAL_DEPS" == "yes" ]]; then - sudo apt-get install -qq python-selinux python-pylibacl python-pyinotify \ - python-yaml yum + if [[ ${PYVER:0:1} == "2" ]]; then + sudo apt-get install -qq python-selinux python-pylibacl yum + fi fi diff --git a/testsuite/install.sh b/testsuite/install.sh index c1685f831..817ed5911 100755 --- a/testsuite/install.sh +++ b/testsuite/install.sh @@ -7,12 +7,16 @@ pip install -r testsuite/requirements.txt --use-mirrors PYVER=$(python -c 'import sys;print(".".join(str(v) for v in sys.version_info[0:2]))') if [[ "$WITH_OPTIONAL_DEPS" == "yes" ]]; then + pip install --use-mirrors genshi PyYAML pyinotify if [[ $PYVER == "2.5" ]]; then - # markdown 2.2.0 is broken on py2.5, so until 2.2.1 is released use 2.1 - pip install --use-mirrors 'markdown<2.2' - pip install --use-mirrors simplejson + # markdown 2.2+ doesn't work on py2.5 + pip install --use-mirrors simplejson 'markdown<2.2' + fi + if [[ ${PYVER:0:1} == "2" ]]; then + # django supports py3k, but South doesn't, and the django bits + # in bcfg2 require South + pip install cheetah 'django<1.5' South M2Crypto fi - pip install --use-mirrors genshi cheetah 'django<1.4' South M2Crypto else # python < 2.6 requires M2Crypto for SSL communication, not just # for encryption support diff --git a/testsuite/requirements.txt b/testsuite/requirements.txt index 8529b247f..2d6dbc557 100644 --- a/testsuite/requirements.txt +++ b/testsuite/requirements.txt @@ -2,4 +2,6 @@ lxml nose mock sphinx -daemon +pylint +pep8 +python-daemon diff --git a/tools/bcfg2_local.py b/tools/bcfg2_local.py index 2b9d39342..edb5a7101 100755 --- a/tools/bcfg2_local.py +++ b/tools/bcfg2_local.py @@ -64,6 +64,12 @@ class LocalClient(Client): def main(): optinfo = Bcfg2.Options.CLIENT_COMMON_OPTIONS optinfo.update(Bcfg2.Options.SERVER_COMMON_OPTIONS) + if 'bundle_quick' in optinfo: + # CLIENT_BUNDLEQUICK option uses -Q, just like the server repo + # option. the server repo is more important for this + # application. + optinfo['bundle_quick'] = Bcfg2.Options.Option('bundlequick', + default=False) setup = Bcfg2.Options.OptionParser(optinfo) setup.parse(sys.argv[1:]) diff --git a/tools/selinux_baseline.py b/tools/selinux_baseline.py index b6997bb29..06f6e6b98 100755 --- a/tools/selinux_baseline.py +++ b/tools/selinux_baseline.py @@ -42,7 +42,7 @@ def main(): baseline.append(lxml.etree.Comment("%s entries" % etype)) extra = handler.FindExtra() for entry in extra: - entry.tag = "BoundSELinux" + entry.tag = "Bound%s" % etype baseline.extend(extra) print(lxml.etree.tostring(baseline, pretty_print=True)) diff --git a/tools/upgrade/1.3/migrate_perms_to_mode.py b/tools/upgrade/1.3/migrate_perms_to_mode.py index e061558d3..18abffec2 100755 --- a/tools/upgrade/1.3/migrate_perms_to_mode.py +++ b/tools/upgrade/1.3/migrate_perms_to_mode.py @@ -13,6 +13,7 @@ def setmodeattr(elem): elem.set('mode', elem.get('perms')) del elem.attrib['perms'] return True + return False def writefile(f, xdata): @@ -32,7 +33,7 @@ def convertinfo(ifile): return found = False for i in xdata.findall('//Info'): - found = setmodeattr(i) + found |= setmodeattr(i) if found: writefile(ifile, xdata) @@ -47,7 +48,7 @@ def convertstructure(structfile): return found = False for path in xdata.xpath('//BoundPath|//Path'): - found = setmodeattr(path) + found |= setmodeattr(path) if found: writefile(structfile, xdata) |