[PATCH 2/2] Deal with config files and selections better
by Josh Adams
Config files and a dict of items to their selections status are now
attached to benchmark objects on import. Also allow the user to specify
active profile on import.
---
src/bin/secstate | 6 +-
src/secstate/main.py | 253 +++++++++++++++++++++++---------------------------
src/secstate/util.py | 99 +++++++++++---------
3 files changed, 173 insertions(+), 185 deletions(-)
diff --git a/src/bin/secstate b/src/bin/secstate
index 9692c0e..2348591 100644
--- a/src/bin/secstate
+++ b/src/bin/secstate
@@ -114,14 +114,14 @@ def import_content(arguments):
help="Imports the specified CPE content")
parser.add_option('-p', '--puppet', action='store_true', dest='puppet', default=False,
help="Imports the specified puppet content")
+ parser.add_option('--profile', action='store', type='string', dest='profile', default="__None__",
+ help="Imports the specified benchmark and sets the active profile")
(options, args) = parser.parse_args(arguments)
for arg in args:
- (benchmark, def_model) = sec_instance.import_content(arg, options.cpe, options.puppet, save=True)
+ (benchmark, def_model) = sec_instance.import_content(arg, options.cpe, options.puppet, save=True, active_profile=options.profile)
if (benchmark == None) and (def_model == None):
return -1
- oscap.oval.definition_model_free(def_model)
- oscap.xccdf.benchmark_free(benchmark)
def export(arguments):
parser = OptionParser(usage="secstate export [options] <benchmark> <file>")
diff --git a/src/secstate/main.py b/src/secstate/main.py
index 8948712..3dd93c8 100644
--- a/src/secstate/main.py
+++ b/src/secstate/main.py
@@ -31,7 +31,6 @@ import subprocess
import time
import mimetypes
import json
-import cProfile
import openscap_api as oscap
from secstate.util import *
@@ -47,8 +46,9 @@ class Secstate:
self.log.error("Could not create config directory: %(dir)s" % {'dir':self.conf_dir})
return (None, None)
- self.content = self.get_content_dict()
- self.content_configs = self.get_content_configs()
+ self.content = {}
+ self.content_configs = {}
+ self.load_content()
self.log = self.getLogger()
self.benchmark_dir = self.config.get('secstate', 'benchmark_dir')
@@ -88,28 +88,14 @@ class Secstate:
return log
- def get_content_dict(self):
- content = {}
+ def load_content(self):
conf_dir = self.config.get('secstate', 'conf_dir')
for conf_file in os.listdir(conf_dir):
id = os.path.splitext(conf_file)[0]
- config = ConfigParser.ConfigParser()
- fp = open(os.path.join(conf_dir, conf_file))
- config.readfp(fp)
+ self.content_configs[id] = os.path.join(conf_dir, conf_file)
+ config = load_config(os.path.join(conf_dir, conf_file))
content_file = config.get(id, 'file')
- content[id] = content_file
- fp.close()
-
- return content
-
- def get_content_configs(self):
- configs = {}
- conf_dir = self.config.get('secstate', 'conf_dir')
- for conf_file in os.listdir(conf_dir):
- id = os.path.splitext(conf_file)[0]
- configs[id] = os.path.join(conf_dir, conf_file)
-
- return configs
+ self.content[id] = content_file
def combine_def_models(self, target, source):
"""
@@ -174,6 +160,17 @@ class Secstate:
self.log.error("Definition model is invalid")
return (None, None)
+ oval_id = os.path.splitext(os.path.basename(oval_file))[0]
+
+ if self.content.has_key(oval_id):
+ config = ConfigParser.ConfigParser()
+ config.optionxform = str
+ if config.read(self.content_configs[oval_id]) == []:
+ self.log.error("Error loading config file: %(file)s" % {'file':self.content_configs[oval_id]})
+ return (None, None)
+
+ def_model.__dict__['config'] = config
+
if store_path:
if not os.path.isdir(store_path):
try:
@@ -197,7 +194,7 @@ class Secstate:
return (None, def_model)
- def import_benchmark(self, benchmark_file, oval_path="", store_path=None):
+ def import_benchmark(self, benchmark_file, oval_path="", store_path=None, changes=False, active_profile='__None__'):
"""
Function: Imports an XCCDF benchmark
Input: Source File, path to associated OVAL content
@@ -241,6 +238,32 @@ class Secstate:
profile.id = "Custom"
benchmark.add_profile(profile)
+ config = ConfigParser.ConfigParser()
+ config.optionxform = str
+ if self.content_configs.has_key(benchmark.id):
+ if config.read(self.content_configs[benchmark.id]) == []:
+ self.log.error("Error opening config file: %(file)s" % {'file':self.content_config[benchmark.id]})
+ return (None, None)
+ else:
+ config.add_section(benchmark.id)
+ config.set(benchmark.id, 'profile', active_profile)
+
+ benchmark.__dict__['config'] = config
+
+ if changes:
+ benchmark = apply_changes_profile(benchmark)
+
+ benchmark.__dict__['selections'] = {}
+ for item in xccdf_get_items(benchmark, oscap.xccdf.XCCDF_ITEM, benchmark.content):
+ benchmark.selections[item.id] = item.selected
+
+ current_profile = benchmark.config.get(benchmark.id, 'profile')
+ if current_profile != '__None__':
+ profile = benchmark.get_item(current_profile).to_profile()
+ prof_sel = get_profile_selections(benchmark, profile)
+ for key,val in prof_sel.items():
+ benchmark.selections[key] = val
+
if store_path != None:
id = get_benchmark_id(benchmark_file)
directory = os.path.join(bench_dir, id)
@@ -251,12 +274,10 @@ class Secstate:
try:
os.mkdir(directory)
shutil.copy(benchmark_file, directory)
- config = ConfigParser.ConfigParser()
- config.add_section(id)
- config.set(id, 'file', os.path.join(directory, os.path.basename(benchmark_file)))
- config.set(id, 'selected', True)
+ benchmark.config.set(id, 'file', os.path.join(directory, os.path.basename(benchmark_file)))
+ benchmark.config.set(id, 'selected', True)
conf_file = open(os.path.join(self.config.get('secstate', 'conf_dir'), id + ".cfg"), 'w')
- config.write(conf_file)
+ benchmark.config.write(conf_file)
conf_file.close()
for oval in list(set(oval_files)):
@@ -268,7 +289,7 @@ class Secstate:
return (benchmark, def_model)
- def import_zipped_content(self, zip, type, store_path, puppet):
+ def import_zipped_content(self, zip, type, store_path, puppet, changes=False, active_profile='__None__'):
"""
Function: Validate and copy content from zipped file to repository
Input: Zipped file contating content and bool whether it contains puppet content
@@ -324,7 +345,7 @@ class Secstate:
self.log.error("Could not find XCCDF benchmark in archive %(file)s", {'file':zip})
return (None, None)
- (benchmark, def_model) = self.import_benchmark(os.path.join(extract_path, xccdf), extract_path, store_path)
+ (benchmark, def_model) = self.import_benchmark(os.path.join(extract_path, xccdf), extract_path, store_path, changes, active_profile=active_profile)
if benchmark == None:
return (None, None)
@@ -333,7 +354,7 @@ class Secstate:
return (benchmark, def_model)
- def import_content(self, content, cpe=False, puppet=False, changes=True, save=False):
+ def import_content(self, content, cpe=False, puppet=False, changes=True, save=False, active_profile='__None__'):
"""
Function: Validates XCCDF/OVAL content and optionally saves it to the data store
Input: File containing content
@@ -353,19 +374,15 @@ class Secstate:
return (None, None)
if self.content.has_key(content):
- (benchmark, oval) = self.import_content(os.path.join(self.benchmark_dir, content, self.content[content]), save=False)
- if changes and (benchmark != None):
- #benchmark = apply_changes(benchmark, os.path.join(self.benchmark_dir, content, str(content + ".cfg")))
- benchmark = apply_changes_profile(benchmark, self.content_configs[content])
+ return self.import_content(self.content[content], cpe, puppet, changes, active_profile=active_profile)
- return (benchmark, oval)
+ if save:
+ store_path = self.config.get('secstate', 'benchmark_dir')
file_type = mimetypes.guess_type(content)
if file_type[0] == "text/xml":
if is_benchmark(content):
xccdf = True
- if save:
- store_path = self.config.get('secstate', 'benchmark_dir')
else:
oval = True
if save:
@@ -375,10 +392,10 @@ class Secstate:
return self.import_oval(content, store_path)
if xccdf:
- return self.import_benchmark(content, store_path=store_path, oval_path=os.path.dirname(content))
+ return self.import_benchmark(content, store_path=store_path, oval_path=os.path.dirname(content), changes=changes, active_profile=active_profile)
else:
- return self.import_zipped_content(content, file_type, store_path=self.config.get('secstate', 'benchmark_dir'), puppet=puppet)
+ return self.import_zipped_content(content, file_type, store_path=store_path, puppet=puppet, changes=changes, active_profile=active_profile)
def export(self, benchmark_id, new_file, original=False):
if not self.content.has_key(benchmark_id):
@@ -416,17 +433,13 @@ class Secstate:
self.remove_content(key)
elif self.content.has_key(benchmark_id):
- cfg = ConfigParser.ConfigParser()
- conf_file = self.content_configs[benchmark_id]
- fp = open(conf_file)
- cfg.readfp(fp)
- fp.close()
+ cfg = load_config(self.content_configs[benchmark_id])
try:
if os.path.split(cfg.get(benchmark_id, "file"))[0] != self.config.get('secstate', 'oval_dir'):
shutil.rmtree(os.path.split(cfg.get(benchmark_id, "file"))[0])
else:
os.remove(cfg.get(benchmark_id, "file"))
- os.remove(conf_file)
+ os.remove(self.content_configs[benchmark_id])
except IOError,e:
self.log.error("Error removing content: %(error)s" % {'error':e})
return False
@@ -444,21 +457,7 @@ class Secstate:
Output: Succes or failure
"""
sel_dict = {'selected':selected, 'message':message}
- bench_cfg = ConfigParser.ConfigParser()
- bench_cfg.optionxform = str
- conf_path = self.content_configs[benchmark_id]
- if os.path.isfile(conf_path):
- try:
- fp = open(conf_path)
- bench_cfg.readfp(fp)
- except IOError, e:
- self.log.error("Could not open config file: %(error)s" % {'error':e})
- return False
- fp.close()
-
- if not bench_cfg.has_section('Custom'):
- bench_cfg.add_section('Custom')
-
+
if not self.content.has_key(benchmark_id):
self.log.error("No benchmark %(id)s in datastore" % {'id':benchmark_id})
return False
@@ -469,13 +468,13 @@ class Secstate:
self.log.error("Error opening benchmark: %(file)s" % {'file':benchmark_id})
return False
else:
- bench_cfg.set(benchmark_id, 'selected', selected)
+ oval.config.set(benchmark_id, 'selected', selected)
self.log.debug("Set Oval file %(file)s to %(sel)s" % {'file':benchmark_id,
'sel':selected})
else:
if item_id == benchmark_id:
- bench_cfg.set(benchmark_id, 'selected', selected)
+ benchmark.config.set(benchmark_id, 'selected', selected)
self.log.debug("Setting %(id)s to %(val)s" % {'id':benchmark_id,
'val':selected})
item = benchmark.to_item()
@@ -488,37 +487,44 @@ class Secstate:
return False
if item.type == oscap.xccdf.XCCDF_PROFILE:
- bench_cfg.set(benchmark_id, 'profile', item_id)
+ benchmark.config.set(benchmark_id, 'profile', item_id)
self.log.debug("Setting active profile to %(id)s" % {'id':item_id})
else:
- if bench_cfg.has_option(benchmark_id, 'profile'):
- active_profile = bench_cfg.get(benchmark_id, 'profile')
+ if benchmark.config.has_option(benchmark_id, 'profile'):
+ active_profile = benchmark.config.get(benchmark_id, 'profile')
if active_profile != "Custom":
- bench_cfg.set('Custom', 'extends', active_profile)
+ if not benchmark.config.has_section("Custom"):
+ benchmark.config.add_section("Custom")
+ benchmark.config.set('Custom', 'extends', active_profile)
if item.type != oscap.xccdf.XCCDF_BENCHMARK:
- bench_cfg.set('Custom', item_id, json.dumps(sel_dict))
+ benchmark.config.set('Custom', item_id, json.dumps(sel_dict))
self.log.debug("Setting %(id)s to %(val)s" % {'id':item_id,
'val':selected})
if selected:
parent = item.parent
while parent.id != benchmark_id:
- bench_cfg.set('Custom', parent_id, json.dumps(sel_dict))
+ benchmark.config.set('Custom', parent_id, json.dumps(sel_dict))
self.log.debug("Setting %(id)s to %(val)s" % {'id':parent_id,
'val':selected})
parent = parent.parent
- bench_cfg.set(benchmark_id, 'profile', 'Custom')
+ benchmark.config.set(benchmark_id, 'profile', 'Custom')
if recurse:
if (item.type == oscap.xccdf.XCCDF_GROUP) or (item.type == oscap.xccdf.XCCDF_BENCHMARK):
for sub in xccdf_get_items(benchmark, oscap.xccdf.XCCDF_ITEM, item.content):
- bench_cfg.set('Custom', sub.id, json.dumps(sel_dict))
+ benchmark.config.set('Custom', sub.id, json.dumps(sel_dict))
self.log.debug("Setting %(id)s to %(val)s" % {'id':sub.id,
'val':selected})
- fp = open(conf_path, 'w')
- bench_cfg.write(fp)
- fp.close()
+ try:
+ fp = open(self.content_configs[benchmark_id], 'w')
+ benchmark.config.write(fp)
+ fp.close()
+ except IOError, e:
+ self.log.error("Error saving changes: %(err)s" % {'err':e})
+ return False
+
return True
def save_profile(self, benchmark_id, profile_name):
@@ -526,12 +532,7 @@ class Secstate:
self.log.error("No benchmark named %(id)s has been imported" % {'id':benchmark_id})
return False
- bench_cfg = ConfigParser.ConfigParser()
- bench_cfg.optionxform = str
- fp = open(self.content_configs[benchmark_id])
- bench_cfg.readfp(fp)
- fp.close()
-
+ bench_cfg = load_config(self.content_configs[benchmark_id])
if bench_cfg.has_section("Custom"):
bench_cfg.add_section(profile_name)
for opt,val in bench_cfg.items("Custom"):
@@ -542,9 +543,13 @@ class Secstate:
self.log.error("No changes have been made to the current profile")
return False
- fp = open(self.content_configs[benchmark_id], 'w')
- bench_cfg.write(fp)
- fp.close()
+ try:
+ fp = open(self.content_configs[benchmark_id], 'w')
+ bench_cfg.write(fp)
+ fp.close()
+ except IOError, e:
+ self.log.error("Error saving changes: %(err)s" % {'err':e})
+ return False
return True
@@ -560,7 +565,6 @@ class Secstate:
benchmark = None
res_model = None
res_benchmark = None
- config = None
if args == []:
args = self.content.keys()
@@ -573,11 +577,11 @@ class Secstate:
else:
if self.content.has_key(arg):
- config = ConfigParser.ConfigParser()
- fp = open(self.content_configs[arg])
- config.readfp(fp)
- fp.close()
- if not all and (not config.getboolean(arg, 'selected')) and (len(args) > 1):
+ if benchmark == None:
+ scanned_content = def_model
+ else:
+ scanned_content = benchmark
+ if not all and (not scanned_content.config.getboolean(arg, 'selected')) and (len(args) > 1):
print "Skipping %(id)s" % {'id':arg}
ret = True
continue
@@ -586,10 +590,10 @@ class Secstate:
sess = oscap.oval.agent_new_session(def_model)
if benchmark != None:
- # Set profile to default found in config file
- if (profile == None) and (config != None):
- if config.has_option(arg, 'profile'):
- profile = config.get(arg, 'profile')
+ # Set profile to default found in benchmark.config.file
+ if (profile == None) and (benchmark.__dict__.has_key('config')):
+ if benchmark.config.has_option(arg, 'profile'):
+ profile = benchmark.config.get(arg, 'profile')
else:
profile = 'Custom'
@@ -631,14 +635,14 @@ class Secstate:
print "In benchmark %(bench)s:" % {'bench':key}
- for item in xccdf_get_itesm(benchmark, oscap.xccdf.XCCDF_ITEM, benchmark.content):
+ for item in xccdf_get_items(benchmark, oscap.xccdf.XCCDF_ITEM, benchmark.content):
title = None
description = None
- if len(item.titles) > 0:
- title = item.titles[0].text
+ if len(item.title) > 0:
+ title = item.title[0].text
- if len(item.descriptions) > 0:
+ if len(item.description) > 0:
description = item.description[0].text
if (title != None) and (description != None):
@@ -686,20 +690,16 @@ class Secstate:
for title in item.title:
print "\tTitle: '%(title)s'" % {'title':title.text}
- for description in item.descriptions:
+ for description in item.description:
print "\tDescription: %(desc)s" % {'desc':description.text}
print "\tSelected: %(sel)s" % {'sel':item.selected}
type = item.type
if type == oscap.xccdf.XCCDF_BENCHMARK:
- bench_cfg = ConfigParser.ConfigParser()
- fp = open(self.content_configs[key])
- bench_cfg.readfp(fp)
- fp.close()
active_profile = None
- if bench_cfg.has_option(key, 'profile'):
- active_profile = bench_cfg.get(key, 'profile')
+ if benchmark.config.has_option(key, 'profile'):
+ active_profile = benchmark.config.get(key, 'profile')
if len(benchmark.profiles) > 0:
print "\tProfiles:"
for profile in benchmark.profiles:
@@ -730,14 +730,15 @@ class Secstate:
return True
- def sublist(self, benchmark, bench_cfg, def_model, arg, recurse, show_all, selects={}, tabs=0):
+ def sublist(self, benchmark, def_model, arg, recurse, show_all, tabs=0):
tabstr = "\t" * tabs
selected = ""
profile = ""
if benchmark == None:
if self.content.has_key(arg):
- if bench_cfg.getboolean(arg, 'selected'):
+ print dir(def_model)
+ if def_model.config.getboolean(arg, 'selected'):
if show_all:
selected = "[X]"
else:
@@ -753,22 +754,15 @@ class Secstate:
item = None
if arg == benchmark.id:
item = benchmark.to_item()
- is_selected = bench_cfg.getboolean(arg, 'selected')
- if bench_cfg.has_option(arg, 'profile'):
- profile = ", Profile: '%s'" % bench_cfg.get(arg, 'profile')
- else:
- profile = ", Profile: None"
+ is_selected = benchmark.config.getboolean(arg, 'selected')
+ profile = ", Profile: '%s'" % benchmark.config.get(arg, 'profile')
+
else:
item = benchmark.get_item(arg)
if item == None:
- return self.sublist(None, bench_cfg, def_model, arg, recurse, show_all, selects, tabs)
-
- is_selected = item.selected
-
- try:
- is_selected = selects[item.id]
- except KeyError, e:
- pass
+ return self.sublist(None, def_model, arg, recurse, show_all, tabs)
+ else:
+ is_selected = benchmark.selections[item.id]
for title in item.title:
if show_all:
@@ -790,7 +784,7 @@ class Secstate:
type = item.type
if (type == oscap.xccdf.XCCDF_GROUP) or (type == oscap.xccdf.XCCDF_BENCHMARK):
for sub in item.content:
- self.sublist(benchmark, bench_cfg, def_model, sub.id, recurse, show_all, selects, tabs+1)
+ self.sublist(benchmark, def_model, sub.id, recurse, show_all, tabs+1)
def list_content(self, arg=None, recurse=False, show_all=False):
@@ -802,29 +796,12 @@ class Secstate:
self.log.error("Error loading benchmark: %(id)s" % {'id':key})
return False
- config = ConfigParser.ConfigParser()
- fp = open(self.content_configs[key])
- config.readfp(fp)
- fp.close()
-
- selects = {}
- if benchmark != None:
- if config.has_option(benchmark.id, 'profile'):
- prof = benchmark.get_item(config.get(benchmark.id, "profile"))
- if prof == None:
- self.log.error("Error loading profile %(prof)s" % {'prof':config.get(benchmark.id, 'profile')})
- return False
- prof = prof.to_profile()
-
- for select in prof.selects:
- selects[select.item] = select.selected
-
if (arg == None) or (arg == key):
- ret = self.sublist(benchmark, config, def_model, key, recurse, show_all, selects)
+ ret = self.sublist(benchmark, def_model, key, recurse, show_all)
else:
if not self.content.has_key(arg):
- ret = self.sublist(benchmark, config, def_model, arg, recurse, show_all, selects)
+ ret = self.sublist(benchmark, def_model, arg, recurse, show_all)
return ret
diff --git a/src/secstate/util.py b/src/secstate/util.py
index 783ed9e..c3b52c0 100644
--- a/src/secstate/util.py
+++ b/src/secstate/util.py
@@ -39,6 +39,19 @@ class SecstateException(Exception):
def __str__(self):
return str(self.reason)
+def load_config(conf_file):
+ config = ConfigParser.ConfigParser()
+ config.optionxform = str
+ try:
+ fp = open(conf_file)
+ config.readfp(fp)
+ fp.close()
+ except IOError, e:
+ sys.stderr.write("Error opening config file: %(file)s" % {'file':conf_file})
+ return None
+
+ return config
+
def xccdf_reporter(msg, usr):
result = oscap.common.reporter_message_get_user2num(msg)
if result == oscap.xccdf.XCCDF_RESULT_PASS:
@@ -70,7 +83,7 @@ def evaluate_xccdf(benchmark, url_XCCDF, sess, s_profile=None, all=False, verbos
if (s_profile != None):
policy = policy_model.get_policy_by_id(s_profile)
else:
- policies = policy_modea.policiesl
+ policies = policy_model.policies
if len(policies) > 0:
policy = policies[0]
@@ -99,9 +112,8 @@ def evaluate_xccdf(benchmark, url_XCCDF, sess, s_profile=None, all=False, verbos
ritem.add_title(title)
ritem.start_time = time.time()
if policy != None:
- id = policy.profile.id
- if id != None:
- ritem.set_profile(id)
+ if policy.profile != None:
+ ritem.set_profile(policy.profile.id)
oscap.oval.agent_export_sysinfo_to_xccdf_result(sess, ritem)
for model in benchmark.models:
@@ -121,7 +133,6 @@ def evaluate_xccdf(benchmark, url_XCCDF, sess, s_profile=None, all=False, verbos
"Informational:\t%(info)s\n" \
"Unknown:\t%(unknown)s\n" % res_dict
- print "HERE"
results_benchmark = benchmark.clone()
results_benchmark.add_result(oscap.xccdf.result_clone(ritem))
res_model = oscap.oval.agent_get_results_model(sess)
@@ -315,50 +326,50 @@ def xccdf_rule_get_defs(rule):
return defs
-def apply_changes_profile(benchmark, conf):
- config = ConfigParser.ConfigParser()
- config.optionxform = str
- if os.path.isfile(conf):
- try:
- fp = open(conf)
- config.readfp(fp)
- except IOError,e:
- sys.stderr.write("Error opening config file: %(err)s\n" % {'err':e})
- return None
- fp.close()
-
- for section in config.sections():
- if section != benchmark.id:
- prof = oscap.xccdf.profile_new()
- if config.has_option(section, 'extends'):
- original_prof = benchmark.get_item(config.get(section, 'extends')).to_profile()
- if len(original_prof.title) > 0:
- new_title = oscap.common.text_new()
- new_title.text = "-- Customized --" + original_prof.title[0].text
- prof.add_title(new_title)
- prof.extends = config.get(section, 'extends')
- else:
+def apply_changes_profile(benchmark):
+ for section in benchmark.config.sections():
+ if section != benchmark.id:
+ prof = oscap.xccdf.profile_new()
+ if benchmark.config.has_option(section, 'extends'):
+ original_prof = benchmark.get_item(benchmark.config.get(section, 'extends')).to_profile()
+ if len(original_prof.title) > 0:
new_title = oscap.common.text_new()
- new_title.text = "Customized profile from secstate"
+ new_title.text = "-- Customized --" + original_prof.title[0].text
prof.add_title(new_title)
- prof.id = section
-
- for id,val in config.items(section):
- if id != 'extends':
- sel_dict = json.loads(val)
- select = oscap.xccdf.select_new()
- select.item = id
- select.selected = sel_dict['selected']
- if sel_dict['message']:
- text = oscap.common.text_new()
- text.text = str(sel_dict['message'])
- select.add_remark(text)
- prof.add_select(select)
-
- benchmark.add_profile(prof)
+ prof.extends = benchmark.config.get(section, 'extends')
+ else:
+ new_title = oscap.common.text_new()
+ new_title.text = "Customized profile from secstate"
+ prof.add_title(new_title)
+ prof.id = section
+
+ for id,val in benchmark.config.items(section):
+ if id != 'extends':
+ sel_dict = json.loads(val)
+ select = oscap.xccdf.select_new()
+ select.item = id
+ select.selected = sel_dict['selected']
+ if sel_dict['message']:
+ text = oscap.common.text_new()
+ text.text = str(sel_dict['message'])
+ select.add_remark(text)
+ prof.add_select(select)
+
+ benchmark.add_profile(prof)
return benchmark
+def get_profile_selections(benchmark, profile):
+ selections = {}
+ if profile.extends != None:
+ selections.update(get_profile_selections(benchmark, benchmark.get_item(profile.extends).to_profile()))
+
+ for sel in profile.selects:
+ selections[sel.item] = sel.selected
+
+ return selections
+
+
def xccdf_get_fixes(benchmark, ignore_ids=[]):
"""
Function: Get all fixes for rules in the XCCDF document
--
1.7.2
13 years, 10 months
[PATCH 1/2] Added the ability to save profiles
by Josh Adams
The user can now save customizations to a named profile, which can be
loaded later.
Fixes bug #7328
---
src/bin/secstate | 10 +++++++
src/secstate/main.py | 42 ++++++++++++++++++++++++-----
src/secstate/util.py | 72 ++++++++++++++++++++-----------------------------
3 files changed, 74 insertions(+), 50 deletions(-)
diff --git a/src/bin/secstate b/src/bin/secstate
index e4bc7f0..9692c0e 100644
--- a/src/bin/secstate
+++ b/src/bin/secstate
@@ -41,6 +41,7 @@ Sub-commands:
export -- Exports imported content along with any changes that have been made
select -- Sets a portion of the benchmark (group/rule) to be selected
deselect -- Sets a portion of the benchmark (group/rule) to be deselected
+ save -- Saves changes made to the current benchmark to a named profile
list -- List the imported benchmarks
show -- Shows the information associated with a group, rule, or definition id
search -- Search through imported content
@@ -98,6 +99,9 @@ def main():
elif subcommand == 'remediate':
remediate(sys.argv[arg_num:])
+
+ elif subcommand == 'save':
+ save_profile(sys.argv[arg_num:])
else:
sys.stderr.write("Uknown subcommand: %(command)s" % {'command':subcommand})
@@ -250,6 +254,12 @@ def show(arguments):
return -1
return 0
+def save_profile(arguments):
+ parser = OptionParser(usage="secstate save [options] <benchmark> <profile name>")
+ (options, args) = parser.parse_args(arguments)
+ if not sec_instance.save_profile(args[0], args[1]):
+ return -1
+
if __name__ == '__main__':
sys.exit(main())
diff --git a/src/secstate/main.py b/src/secstate/main.py
index d38e96e..8948712 100644
--- a/src/secstate/main.py
+++ b/src/secstate/main.py
@@ -456,8 +456,8 @@ class Secstate:
return False
fp.close()
- if not bench_cfg.has_section('selections'):
- bench_cfg.add_section('selections')
+ if not bench_cfg.has_section('Custom'):
+ bench_cfg.add_section('Custom')
if not self.content.has_key(benchmark_id):
self.log.error("No benchmark %(id)s in datastore" % {'id':benchmark_id})
@@ -494,16 +494,16 @@ class Secstate:
if bench_cfg.has_option(benchmark_id, 'profile'):
active_profile = bench_cfg.get(benchmark_id, 'profile')
if active_profile != "Custom":
- bench_cfg.set(benchmark_id, 'extends', active_profile)
+ bench_cfg.set('Custom', 'extends', active_profile)
if item.type != oscap.xccdf.XCCDF_BENCHMARK:
- bench_cfg.set('selections', item_id, json.dumps(sel_dict))
+ bench_cfg.set('Custom', item_id, json.dumps(sel_dict))
self.log.debug("Setting %(id)s to %(val)s" % {'id':item_id,
'val':selected})
if selected:
parent = item.parent
while parent.id != benchmark_id:
- bench_cfg.set('selections', parent_id, json.dumps(sel_dict))
+ bench_cfg.set('Custom', parent_id, json.dumps(sel_dict))
self.log.debug("Setting %(id)s to %(val)s" % {'id':parent_id,
'val':selected})
parent = parent.parent
@@ -512,7 +512,7 @@ class Secstate:
if recurse:
if (item.type == oscap.xccdf.XCCDF_GROUP) or (item.type == oscap.xccdf.XCCDF_BENCHMARK):
for sub in xccdf_get_items(benchmark, oscap.xccdf.XCCDF_ITEM, item.content):
- bench_cfg.set('selections', sub.id, json.dumps(sel_dict))
+ bench_cfg.set('Custom', sub.id, json.dumps(sel_dict))
self.log.debug("Setting %(id)s to %(val)s" % {'id':sub.id,
'val':selected})
@@ -521,6 +521,34 @@ class Secstate:
fp.close()
return True
+ def save_profile(self, benchmark_id, profile_name):
+ if not self.content.has_key(benchmark_id):
+ self.log.error("No benchmark named %(id)s has been imported" % {'id':benchmark_id})
+ return False
+
+ bench_cfg = ConfigParser.ConfigParser()
+ bench_cfg.optionxform = str
+ fp = open(self.content_configs[benchmark_id])
+ bench_cfg.readfp(fp)
+ fp.close()
+
+ if bench_cfg.has_section("Custom"):
+ bench_cfg.add_section(profile_name)
+ for opt,val in bench_cfg.items("Custom"):
+ bench_cfg.set(profile_name, opt, val)
+ bench_cfg.remove_section("Custom")
+ bench_cfg.set(benchmark_id, 'profile', profile_name)
+ else:
+ self.log.error("No changes have been made to the current profile")
+ return False
+
+ fp = open(self.content_configs[benchmark_id], 'w')
+ bench_cfg.write(fp)
+ fp.close()
+
+ return True
+
+
def audit(self, interpreter, args, profile=None, verbose=False, all=False, xml=None, html=None):
"""
Function: Run an audit on the system agains the given definition model
@@ -784,7 +812,7 @@ class Secstate:
if config.has_option(benchmark.id, 'profile'):
prof = benchmark.get_item(config.get(benchmark.id, "profile"))
if prof == None:
- self.log.error("Error loading profile %(prof)s" % {'prof':profile})
+ self.log.error("Error loading profile %(prof)s" % {'prof':config.get(benchmark.id, 'profile')})
return False
prof = prof.to_profile()
diff --git a/src/secstate/util.py b/src/secstate/util.py
index 214e782..783ed9e 100644
--- a/src/secstate/util.py
+++ b/src/secstate/util.py
@@ -315,24 +315,6 @@ def xccdf_rule_get_defs(rule):
return defs
-def apply_changes(benchmark, conf):
- config = ConfigParser.ConfigParser()
- config.optionxform = str
- if os.path.isfile(conf):
- try:
- fp = open(conf)
- config.readfp(fp)
- except IOError,e:
- sys.stderr.write("Error opening config file: %(err)s\n" % {'err':e})
- return None
- fp.close()
-
- for id in config.options("selections"):
- item = benchmark.get_item(id)
- item.selected = config.getboolean('selections', id)
-
- return benchmark
-
def apply_changes_profile(benchmark, conf):
config = ConfigParser.ConfigParser()
config.optionxform = str
@@ -345,31 +327,35 @@ def apply_changes_profile(benchmark, conf):
return None
fp.close()
- if config.has_option(benchmark.id, 'extends'):
- prof = benchmark.get_item(config.get(benchmark.id, 'extends')).to_profile().clone()
- titles = prof.title
- if len(titles) > 0:
- title = titles[0]
- original_text = title.text
- title.text = "-- Customized --" + original_text
- prof.extends = config.get(benchmark.id, 'extends')
- else:
- prof = oscap.xccdf.profile_new()
- prof.id = "Custom"
-
- if config.has_section("selections"):
- for id in config.options("selections"):
- sel_dict = json.loads(config.get('selections', id))
- select = oscap.xccdf.select_new()
- select.item = id
- select.selected = sel_dict['selected']
- if sel_dict['message']:
- text = oscap.common.text_new()
- text.text = str(sel_dict['message'])
- select.add_remark(text)
- prof.add_select(select)
-
- benchmark.add_profile(prof)
+ for section in config.sections():
+ if section != benchmark.id:
+ prof = oscap.xccdf.profile_new()
+ if config.has_option(section, 'extends'):
+ original_prof = benchmark.get_item(config.get(section, 'extends')).to_profile()
+ if len(original_prof.title) > 0:
+ new_title = oscap.common.text_new()
+ new_title.text = "-- Customized --" + original_prof.title[0].text
+ prof.add_title(new_title)
+ prof.extends = config.get(section, 'extends')
+ else:
+ new_title = oscap.common.text_new()
+ new_title.text = "Customized profile from secstate"
+ prof.add_title(new_title)
+ prof.id = section
+
+ for id,val in config.items(section):
+ if id != 'extends':
+ sel_dict = json.loads(val)
+ select = oscap.xccdf.select_new()
+ select.item = id
+ select.selected = sel_dict['selected']
+ if sel_dict['message']:
+ text = oscap.common.text_new()
+ text.text = str(sel_dict['message'])
+ select.add_remark(text)
+ prof.add_select(select)
+
+ benchmark.add_profile(prof)
return benchmark
--
1.7.2
13 years, 10 months
[PATCH] Update makefile for ifdefiend puppet module
by Francisco Slavin
---
Makefile | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/Makefile b/Makefile
index c8e2671..83847a3 100644
--- a/Makefile
+++ b/Makefile
@@ -29,7 +29,7 @@ CLIP_DIR := $(CWD)../
RPM_PACKAGER := Tresys Technology, LLC
QUIET ?= 0
-MODULE_LIST = file_perms pam
+MODULE_LIST = file_perms pam ifdefined
# Add additional subdirs for Make recursion here.
# The all, clean, and bare targets will be called on these directories
--
1.7.2
13 years, 10 months
[PATCH 1/2] Added puppet ifdefined module, updated spec file and license info accordingly
by Francisco Slavin
---
LICENSE | 2 +-
dist/secstate.spec | 2 +
remediation/puppet-modules/ifdefined/LICENSE | 10 +++++++
remediation/puppet-modules/ifdefined/README | 26 ++++++++++++++++++++
.../plugins/puppet/parser/functions/ifdefined.rb | 21 ++++++++++++++++
5 files changed, 60 insertions(+), 1 deletions(-)
create mode 100644 remediation/puppet-modules/ifdefined/LICENSE
create mode 100644 remediation/puppet-modules/ifdefined/README
create mode 100644 remediation/puppet-modules/ifdefined/plugins/puppet/parser/functions/ifdefined.rb
diff --git a/LICENSE b/LICENSE
index 1dd738f..16dd111 100644
--- a/LICENSE
+++ b/LICENSE
@@ -20,4 +20,4 @@ The pam puppet module:
BSD:
The OVAL to XHTML transform:
/etc/secstate/results_to_html.xsl
-
+ /usr/share/puppet/modules/ifdefined/*
diff --git a/dist/secstate.spec b/dist/secstate.spec
index 0ab9240..bf24209 100644
--- a/dist/secstate.spec
+++ b/dist/secstate.spec
@@ -68,6 +68,8 @@ rm -rf $RPM_BUILD_ROOT
# BSD
%config(noreplace) %{_sysconfdir}/secstate/results_to_html.xsl
+%dir /usr/share/puppet/modules/ifdefined
+/usr/share/puppet/modules/ifdefined/*
%changelog
* Fri Jul 16 2010 Marshall Miller <mmiller(a)tresys.com> 0.3-8
diff --git a/remediation/puppet-modules/ifdefined/LICENSE b/remediation/puppet-modules/ifdefined/LICENSE
new file mode 100644
index 0000000..d170e97
--- /dev/null
+++ b/remediation/puppet-modules/ifdefined/LICENSE
@@ -0,0 +1,10 @@
+Copyright (c) 2010, Michael DeHaan, Puppet Labs
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+ * Neither the name of the Organization nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/remediation/puppet-modules/ifdefined/README b/remediation/puppet-modules/ifdefined/README
new file mode 100644
index 0000000..6946e32
--- /dev/null
+++ b/remediation/puppet-modules/ifdefined/README
@@ -0,0 +1,26 @@
+This is a puppet module for using a variable if it is defined, and otherwise not
+doing anything with the variable.
+
+Example:
+
+file { "/path/to/foo.txt":
+ mode => ifdefined('mode')
+}
+
+This will do nothing if an external nodes tool does not set $mode, and if it is
+set, will use the mode, exactly as specified.
+
+This is a shortcut around this syntax:
+
+file { "/path/to/foo.txt":
+ mode ? {
+ '' : undef,
+ default : $mode
+ }
+}
+
+This will allow the file to stay as set on the system unless explicitly set,
+in other words.
+
+Michael DeHaan <michael(a)puppetlabs.com>
+
diff --git a/remediation/puppet-modules/ifdefined/plugins/puppet/parser/functions/ifdefined.rb b/remediation/puppet-modules/ifdefined/plugins/puppet/parser/functions/ifdefined.rb
new file mode 100644
index 0000000..2facc22
--- /dev/null
+++ b/remediation/puppet-modules/ifdefined/plugins/puppet/parser/functions/ifdefined.rb
@@ -0,0 +1,21 @@
+# ifdefined -- if a variable is defined, use it, otherwise use undef
+#
+# which means, for instance, "if this variable is defined, set the mode to it,
+# otherwise do nothing"
+#
+# allows:
+# $x = ifdefined('foo')
+#
+# Michael DeHaan <michael(a)puppetlabs.com>
+
+module Puppet::Parser::Functions
+ newfunction(:ifdefined, :type => :rvalue) do |args|
+ key = args[0]
+ value = lookupvar(key)
+ if value == ''
+ value = :undef # not nil
+ end
+ value
+ end
+end
+
--
1.7.2
13 years, 10 months
[PATCH] fixes remediation for the new openscap api
by mkeeler@tresys.com
From: Matt Keeler <mkeeler(a)tresys.com>
---
src/secstate/util.py | 4 ++--
1 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/secstate/util.py b/src/secstate/util.py
index 214e782..05284bf 100644
--- a/src/secstate/util.py
+++ b/src/secstate/util.py
@@ -384,7 +384,7 @@ def xccdf_get_fixes(benchmark, ignore_ids=[]):
fixes = []
for rule in rules:
if rule.id not in ignore_ids:
- fixes.extend(xccdf_fix_list(rule.fixes))
+ fixes.extend(rule.fixes)
return fixes
def item_get_type_str(item):
@@ -466,7 +466,7 @@ def dereference_sub_elements(fix, benchmark):
sub_element_re = r'<sub\s+.*idref="(.*?)"\s*/\s*>'
replacement_re = r'<sub\s+.*idref="%s"\s*/\s*>'
- content = fix._content
+ content = fix.content
ids = [m.group(1) for m in re.finditer(sub_element_re, content)]
for id in ids:
#lookup the actual value the id points too
--
1.6.5.2
13 years, 10 months
[PATCH] Added the ability to save profiles
by Josh Adams
The user can now save customizations to a named profile, which can be
loaded later.
Fixes bug #7328
---
src/bin/secstate | 10 +++++++
src/secstate/main.py | 42 ++++++++++++++++++++++++-----
src/secstate/util.py | 72 ++++++++++++++++++++-----------------------------
3 files changed, 74 insertions(+), 50 deletions(-)
diff --git a/src/bin/secstate b/src/bin/secstate
index e4bc7f0..9692c0e 100644
--- a/src/bin/secstate
+++ b/src/bin/secstate
@@ -41,6 +41,7 @@ Sub-commands:
export -- Exports imported content along with any changes that have been made
select -- Sets a portion of the benchmark (group/rule) to be selected
deselect -- Sets a portion of the benchmark (group/rule) to be deselected
+ save -- Saves changes made to the current benchmark to a named profile
list -- List the imported benchmarks
show -- Shows the information associated with a group, rule, or definition id
search -- Search through imported content
@@ -98,6 +99,9 @@ def main():
elif subcommand == 'remediate':
remediate(sys.argv[arg_num:])
+
+ elif subcommand == 'save':
+ save_profile(sys.argv[arg_num:])
else:
sys.stderr.write("Uknown subcommand: %(command)s" % {'command':subcommand})
@@ -250,6 +254,12 @@ def show(arguments):
return -1
return 0
+def save_profile(arguments):
+ parser = OptionParser(usage="secstate save [options] <benchmark> <profile name>")
+ (options, args) = parser.parse_args(arguments)
+ if not sec_instance.save_profile(args[0], args[1]):
+ return -1
+
if __name__ == '__main__':
sys.exit(main())
diff --git a/src/secstate/main.py b/src/secstate/main.py
index d38e96e..8948712 100644
--- a/src/secstate/main.py
+++ b/src/secstate/main.py
@@ -456,8 +456,8 @@ class Secstate:
return False
fp.close()
- if not bench_cfg.has_section('selections'):
- bench_cfg.add_section('selections')
+ if not bench_cfg.has_section('Custom'):
+ bench_cfg.add_section('Custom')
if not self.content.has_key(benchmark_id):
self.log.error("No benchmark %(id)s in datastore" % {'id':benchmark_id})
@@ -494,16 +494,16 @@ class Secstate:
if bench_cfg.has_option(benchmark_id, 'profile'):
active_profile = bench_cfg.get(benchmark_id, 'profile')
if active_profile != "Custom":
- bench_cfg.set(benchmark_id, 'extends', active_profile)
+ bench_cfg.set('Custom', 'extends', active_profile)
if item.type != oscap.xccdf.XCCDF_BENCHMARK:
- bench_cfg.set('selections', item_id, json.dumps(sel_dict))
+ bench_cfg.set('Custom', item_id, json.dumps(sel_dict))
self.log.debug("Setting %(id)s to %(val)s" % {'id':item_id,
'val':selected})
if selected:
parent = item.parent
while parent.id != benchmark_id:
- bench_cfg.set('selections', parent_id, json.dumps(sel_dict))
+ bench_cfg.set('Custom', parent_id, json.dumps(sel_dict))
self.log.debug("Setting %(id)s to %(val)s" % {'id':parent_id,
'val':selected})
parent = parent.parent
@@ -512,7 +512,7 @@ class Secstate:
if recurse:
if (item.type == oscap.xccdf.XCCDF_GROUP) or (item.type == oscap.xccdf.XCCDF_BENCHMARK):
for sub in xccdf_get_items(benchmark, oscap.xccdf.XCCDF_ITEM, item.content):
- bench_cfg.set('selections', sub.id, json.dumps(sel_dict))
+ bench_cfg.set('Custom', sub.id, json.dumps(sel_dict))
self.log.debug("Setting %(id)s to %(val)s" % {'id':sub.id,
'val':selected})
@@ -521,6 +521,34 @@ class Secstate:
fp.close()
return True
+ def save_profile(self, benchmark_id, profile_name):
+ if not self.content.has_key(benchmark_id):
+ self.log.error("No benchmark named %(id)s has been imported" % {'id':benchmark_id})
+ return False
+
+ bench_cfg = ConfigParser.ConfigParser()
+ bench_cfg.optionxform = str
+ fp = open(self.content_configs[benchmark_id])
+ bench_cfg.readfp(fp)
+ fp.close()
+
+ if bench_cfg.has_section("Custom"):
+ bench_cfg.add_section(profile_name)
+ for opt,val in bench_cfg.items("Custom"):
+ bench_cfg.set(profile_name, opt, val)
+ bench_cfg.remove_section("Custom")
+ bench_cfg.set(benchmark_id, 'profile', profile_name)
+ else:
+ self.log.error("No changes have been made to the current profile")
+ return False
+
+ fp = open(self.content_configs[benchmark_id], 'w')
+ bench_cfg.write(fp)
+ fp.close()
+
+ return True
+
+
def audit(self, interpreter, args, profile=None, verbose=False, all=False, xml=None, html=None):
"""
Function: Run an audit on the system agains the given definition model
@@ -784,7 +812,7 @@ class Secstate:
if config.has_option(benchmark.id, 'profile'):
prof = benchmark.get_item(config.get(benchmark.id, "profile"))
if prof == None:
- self.log.error("Error loading profile %(prof)s" % {'prof':profile})
+ self.log.error("Error loading profile %(prof)s" % {'prof':config.get(benchmark.id, 'profile')})
return False
prof = prof.to_profile()
diff --git a/src/secstate/util.py b/src/secstate/util.py
index 214e782..783ed9e 100644
--- a/src/secstate/util.py
+++ b/src/secstate/util.py
@@ -315,24 +315,6 @@ def xccdf_rule_get_defs(rule):
return defs
-def apply_changes(benchmark, conf):
- config = ConfigParser.ConfigParser()
- config.optionxform = str
- if os.path.isfile(conf):
- try:
- fp = open(conf)
- config.readfp(fp)
- except IOError,e:
- sys.stderr.write("Error opening config file: %(err)s\n" % {'err':e})
- return None
- fp.close()
-
- for id in config.options("selections"):
- item = benchmark.get_item(id)
- item.selected = config.getboolean('selections', id)
-
- return benchmark
-
def apply_changes_profile(benchmark, conf):
config = ConfigParser.ConfigParser()
config.optionxform = str
@@ -345,31 +327,35 @@ def apply_changes_profile(benchmark, conf):
return None
fp.close()
- if config.has_option(benchmark.id, 'extends'):
- prof = benchmark.get_item(config.get(benchmark.id, 'extends')).to_profile().clone()
- titles = prof.title
- if len(titles) > 0:
- title = titles[0]
- original_text = title.text
- title.text = "-- Customized --" + original_text
- prof.extends = config.get(benchmark.id, 'extends')
- else:
- prof = oscap.xccdf.profile_new()
- prof.id = "Custom"
-
- if config.has_section("selections"):
- for id in config.options("selections"):
- sel_dict = json.loads(config.get('selections', id))
- select = oscap.xccdf.select_new()
- select.item = id
- select.selected = sel_dict['selected']
- if sel_dict['message']:
- text = oscap.common.text_new()
- text.text = str(sel_dict['message'])
- select.add_remark(text)
- prof.add_select(select)
-
- benchmark.add_profile(prof)
+ for section in config.sections():
+ if section != benchmark.id:
+ prof = oscap.xccdf.profile_new()
+ if config.has_option(section, 'extends'):
+ original_prof = benchmark.get_item(config.get(section, 'extends')).to_profile()
+ if len(original_prof.title) > 0:
+ new_title = oscap.common.text_new()
+ new_title.text = "-- Customized --" + original_prof.title[0].text
+ prof.add_title(new_title)
+ prof.extends = config.get(section, 'extends')
+ else:
+ new_title = oscap.common.text_new()
+ new_title.text = "Customized profile from secstate"
+ prof.add_title(new_title)
+ prof.id = section
+
+ for id,val in config.items(section):
+ if id != 'extends':
+ sel_dict = json.loads(val)
+ select = oscap.xccdf.select_new()
+ select.item = id
+ select.selected = sel_dict['selected']
+ if sel_dict['message']:
+ text = oscap.common.text_new()
+ text.text = str(sel_dict['message'])
+ select.add_remark(text)
+ prof.add_select(select)
+
+ benchmark.add_profile(prof)
return benchmark
--
1.7.2
13 years, 10 months
[PATCH] if the .cmd file doesn't exist in /test_name/command/ then bail out of the test before it runs
by Chris Smith
---
testing/harness/secstate_harness/testcase.py | 7 +++++--
1 files changed, 5 insertions(+), 2 deletions(-)
diff --git a/testing/harness/secstate_harness/testcase.py b/testing/harness/secstate_harness/testcase.py
index 4cae8a9..2b77142 100644
--- a/testing/harness/secstate_harness/testcase.py
+++ b/testing/harness/secstate_harness/testcase.py
@@ -53,7 +53,10 @@ class Command:
self.command_path = os.path.join(commands_dir, '%s.cmd' % self.command_name)
if not os.path.exists(self.command_path):
- raise Exception('Error: Command Not Valid: %s.cmd does not exist in %s' % (self.command_name, self.command_path))
+ print 'Error: Command Not Valid: %s.cmd does not exist in %s' % (self.command_name, self.command_path)
+ sys.exit(0)
+ #if not os.path.exists(self.command_path):
+ #raise Exception('Error: Command Not Valid: %s.cmd does not exist in %s' % (self.command_name, self.command_path))
self.rc_path = os.path.join(commands_dir, '%s.rc' % self.command_name)
self.stdout_path = os.path.join(commands_dir, '%s.stdout' % self.command_name)
@@ -300,4 +303,4 @@ class TestCase:
sys.stdout.write(join(tab(['Verification Section Completed Successfully: %s\n' % str(cmds_ok)])))
return cmds_ok
-
\ No newline at end of file
+
--
1.7.0.1
13 years, 10 months
[PATCH] Implemented reverse search for OVAL to XCCDF
by Josh Adams
Search can now trace an OVAL definition to the XCCDF rule that uses it.
Fixes bug #7240
---
src/bin/secstate | 4 +++-
src/secstate/main.py | 22 ++++++++++++++++++----
src/secstate/util.py | 8 ++++++++
3 files changed, 29 insertions(+), 5 deletions(-)
diff --git a/src/bin/secstate b/src/bin/secstate
index e4bc7f0..5e0591a 100644
--- a/src/bin/secstate
+++ b/src/bin/secstate
@@ -219,8 +219,10 @@ def search(arguments):
parser = OptionParser(usage="secstate search [options] <string>")
parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False,
help="Show extra information from the search")
+ parser.add_option('-r', '--reverse', action='store_true', dest='reverse', default=False,
+ help="Search for an XCCDF rule based on an OVAL definition id")
(options, args) = parser.parse_args(arguments)
- if not sec_instance.search(args[0], options.verbose):
+ if not sec_instance.search(args[0], options.verbose, options.reverse):
return -1
def list_content(arguments):
diff --git a/src/secstate/main.py b/src/secstate/main.py
index d38e96e..0c2870f 100644
--- a/src/secstate/main.py
+++ b/src/secstate/main.py
@@ -588,7 +588,7 @@ class Secstate:
return True
- def search(self, search_string, verbose=False):
+ def search(self, search_string, verbose=False, reverse=False):
"""
Function: Searches though all imported benchmarks for a string of text
Input: A benchmark id and a string to search for
@@ -596,13 +596,27 @@ class Secstate:
Side Effects: Prints out the results of the search
"""
for key in self.content:
- benchmark = oscap.xccdf.benchmark_import(os.path.join(self.benchmark_dir, key, self.content[key]))
+ (benchmark, def_model) = self.import_content(key)
if benchmark == None:
- self.log.error("Error importing benchmark: %(key)s" % {'key':key})
- return False
+ if def_model == None:
+ self.log.error("Error importing content: %(key)s" % {'key':key})
+ return False
+ else:
+ continue
print "In benchmark %(bench)s:" % {'bench':key}
+ if reverse:
+ defn = def_model.get_definition(search_string)
+ if defn == None:
+ continue
+ else:
+ rule_defs = rules_to_defs(benchmark)
+ for k, v in rule_defs.iteritems():
+ if search_string in v:
+ print "OVAL Definition %(id)s is used by %(rule_id)s" % {'id':search_string, 'rule_id':k}
+ continue
+
for item in xccdf_get_itesm(benchmark, oscap.xccdf.XCCDF_ITEM, benchmark.content):
title = None
description = None
diff --git a/src/secstate/util.py b/src/secstate/util.py
index 214e782..93ce34a 100644
--- a/src/secstate/util.py
+++ b/src/secstate/util.py
@@ -315,6 +315,14 @@ def xccdf_rule_get_defs(rule):
return defs
+def rules_to_defs(benchmark):
+ res_defs = {}
+ rules = xccdf_get_items(benchmark, oscap.xccdf.XCCDF_RULE)
+ for rule in rules:
+ res_defs[rule.id] = xccdf_rule_get_defs(rule)
+
+ return res_defs
+
def apply_changes(benchmark, conf):
config = ConfigParser.ConfigParser()
config.optionxform = str
--
1.7.2
13 years, 10 months
[PATCH] Fixed segfault and removed cProfile
by Josh Adams
Forgot to remove the import call to cProfile
---
src/bin/secstate | 2 --
src/secstate/main.py | 1 -
2 files changed, 0 insertions(+), 3 deletions(-)
diff --git a/src/bin/secstate b/src/bin/secstate
index e4bc7f0..88dd58a 100644
--- a/src/bin/secstate
+++ b/src/bin/secstate
@@ -116,8 +116,6 @@ def import_content(arguments):
(benchmark, def_model) = sec_instance.import_content(arg, options.cpe, options.puppet, save=True)
if (benchmark == None) and (def_model == None):
return -1
- oscap.oval.definition_model_free(def_model)
- oscap.xccdf.benchmark_free(benchmark)
def export(arguments):
parser = OptionParser(usage="secstate export [options] <benchmark> <file>")
diff --git a/src/secstate/main.py b/src/secstate/main.py
index d38e96e..c1abe83 100644
--- a/src/secstate/main.py
+++ b/src/secstate/main.py
@@ -31,7 +31,6 @@ import subprocess
import time
import mimetypes
import json
-import cProfile
import openscap_api as oscap
from secstate.util import *
--
1.7.2
13 years, 10 months
[PATCH] Fixes to test_harness
by mkeeler@tresys.com
From: Matt Keeler <mkeeler(a)tresys.com>
Check against output files not just rc,stdout and stderr.
Some fixes to test files for what the actual output should be.
---
testing/harness/secstate_harness/testcase.py | 82 ++++++++++++++------
.../tests/import_xccdf_tgz/commands/import.stdout | 5 +-
.../output/var/lib/secstate/configs/PassComp.cfg | 2 +-
testing/tests/import_xccdf_tgz/test.manifest | 1 -
4 files changed, 61 insertions(+), 29 deletions(-)
diff --git a/testing/harness/secstate_harness/testcase.py b/testing/harness/secstate_harness/testcase.py
index 4cae8a9..f994622 100644
--- a/testing/harness/secstate_harness/testcase.py
+++ b/testing/harness/secstate_harness/testcase.py
@@ -31,7 +31,7 @@ def join(lst):
return ''.join(lst)
class Command:
- results_template = 'Command %(command_name)s Results:\n%(rc_results)s%(stdout_results)s%(stderr_results)s\n\tCommand Completed Successfully: %(success)s'
+ results_template = 'Command %(command_name)s Results:\n%(rc_results)s%(stdout_results)s%(stderr_results)sCommand Completed Successfully: %(success)s\n'
rc_results = 'Expected RC : %(expected)s, Actual RC : %(actual)s'
stdout_results = 'Expected STDOUT :\n%(expected)s\nActual STDOUT :\n%(actual)s\n'
stderr_results = 'Expected STDERR :\n%(expected)s\nActual STDERR :\n%(actual)s\n'
@@ -72,20 +72,24 @@ class Command:
def cache_rc(self):
rc_file = open(self.rc_path, 'r')
- self.expected_rc = rc_file.readline().strip('\n')
+ self.expected_rc = int(rc_file.readline().strip('\n'))
rc_file.close()
def cache_stdout(self):
if self.stdout_exists:
stdout = open(self.stdout_path)
- self.expected_stdout = stdout.readlines()
+ self.expected_stdout = [line.rstrip('\n') for line in stdout.readlines()]
stdout.close()
+ if not self.expected_stdout:
+ self.expected_stdout = ['']
def cache_stderr(self):
if self.stderr_exists:
stderr = open(self.stderr_path)
- self.expected_stderr = stderr.readlines()
+ self.expected_stderr = [line.rstrip('\n') for line in stderr.readlines()]
stderr.close()
+ if not self.expected_stderr:
+ self.expected_stderr = ['']
def execute(self, chroot):
if not self.cmd_args:
@@ -116,12 +120,12 @@ class Command:
return False
elif not self.expected_stdout:
self.cache_stdout()
-
- return self.stdout == self.expected_stdout
+
+ return self.stdout != self.expected_stdout
def stdout_differ_string(self):
if self.does_stdout_differ():
- return self.stdout_results % {'expected' : join(tab(self.expected_stdout)), 'actual' : join(tab(self.stdout))}
+ return self.stdout_results % {'expected' : '\n'.join(tab(self.expected_stdout)), 'actual' : '\n'.join(tab(self.stdout))}
else:
return ''
@@ -132,12 +136,11 @@ class Command:
return False
elif not self.expected_stderr:
self.cache_stderr()
- return self.stderr == self.expected_stderr
+ return self.stderr != self.expected_stderr
-
def stderr_differ_string(self):
if self.does_stderr_differ():
- return self.stderr_results % { 'expected' : tab(self.expected_stderr), 'actual' : tab(self.stderr)}
+ return self.stderr_results % { 'expected' : '\n'.join(tab(self.expected_stderr)), 'actual' : '\n'.join(tab(self.stderr))}
else:
return ''
@@ -148,7 +151,7 @@ class Command:
return False
elif not self.expected_rc:
self.cache_rc()
- return self.rc == self.expected_rc
+ return self.rc != self.expected_rc
def rc_differ_string(self):
if self.does_rc_differ():
@@ -158,9 +161,9 @@ class Command:
def get_results_string(self):
rep = { 'command_name' : self.command_name,
- 'rc_results' : self.rc_differ_string() + '\n' if self.rc_differ_string() else '',
- 'stdout_results' : join(tab(self.stdout_differ_string().split('\n'))) + '\n' if self.rc_differ_string() else '',
- 'stderr_results' : '%s\n' % join(tab(self.stderr_differ_string().split('\n'))) + '\n' if self.rc_differ_string() else '',
+ 'rc_results' : (join(tab([self.rc_differ_string()])) + '\n') if self.does_rc_differ() else join(tab(['Expected RC matches Actual RC\n'])),
+ 'stdout_results' : ('\n'.join(tab(self.stdout_differ_string().split('\n'))) + '\n') if self.does_stdout_differ() else join(tab(['Expect STDOUT matches Actual STDOUT\n'])),
+ 'stderr_results' : ('\n'.join(tab(self.stderr_differ_string().split('\n'))) + '\n') if self.does_stderr_differ() else join(tab(['Expect STDERR matches Actual STDERR\n'])),
'success' : not (self.does_rc_differ() or self.does_stdout_differ() or self.does_stderr_differ())}
return self.results_template % rep
@@ -233,7 +236,34 @@ class TestCase:
shutil.copy2(fpath, os.path.join(chroot_dir, basename))
def check_output_files(self):
- pass
+ rc = True
+ expected = 'Expected File Contents:\n%s'
+ actual = 'Actual File Contents:\n%s'
+ fdiffer = 'Output File %(oname)s differs:\n%(expected)s\n%(actual)s\n'
+ output_path = os.path.join(self.files_dir, 'output')
+ for root,dirs,files in os.walk(output_path):
+ for file in files:
+ opath = os.path.join(root, file)
+ dirname = re.sub(r'^%s(.*)' % re.escape(output_path), r'\1', root).lstrip(os.path.sep)
+ cpath = os.path.join(self.chroot, dirname, file)
+
+ ofile = open(opath)
+ otext = ofile.readlines()
+ ofile.close()
+
+ cfile = open(cpath)
+ ctext = cfile.readlines()
+ cfile.close()
+
+ if otext != ctext:
+ rc = False
+ etext = '\n'.join(tab((expected % join(tab(otext))).split('\n')))
+ atext = '\n'.join(tab((actual % join(tab(ctext))).split('\n')))
+ dtext = fdiffer % {'oname' : os.path.join(dirname,file), 'expected' : etext, 'actual' : atext}
+ sys.stdout.write('\n'.join(tab(dtext.rstrip('\n').split('\n'))) + '\n')
+ if rc:
+ sys.stdout.write('\n'.join(tab(['All Output Files matched expected output'])) + '\n\n')
+ return rc
def run_test(self):
sys.stdout.write('Test %s:\n' % self.test_name)
@@ -243,9 +273,11 @@ class TestCase:
if rc:
rc = self.__run_verification()
if rc:
- sys.stdout.write('Test %s Completed Successfully: True\n' % self.test_name)
- return True
- sys.stdout.write('Test %s Completed Successfully: False\n' % self.tests_name)
+ rc = self.check_output_files()
+ if rc:
+ sys.stdout.write('Test %s Completed Successfully: True\n' % self.test_name)
+ return True
+ sys.stdout.write('Test %s Completed Successfully: False\n' % self.test_name)
return False
def __run_requires(self):
@@ -259,12 +291,12 @@ class TestCase:
if cmd.does_rc_differ() or cmd.does_stdout_differ() or cmd.does_stderr_differ():
cmds_ok = False
- sys.stdout.write('%s\n\n' % '\n'.join(tab(cmd.get_results_string().split('\n'), num_tabs=2)))
+ sys.stdout.write('%s\n' % '\n'.join(tab(cmd.get_results_string().split('\n'), num_tabs=2)))
if not cmds_ok:
break
- sys.stdout.write(join(tab(['Requires Section Completed Successfully: %s\n' % str(cmds_ok)])))
+ sys.stdout.write(join(tab(['Requires Section Completed Successfully: %s\n\n' % str(cmds_ok)])))
return cmds_ok
def __run_tests(self):
@@ -275,12 +307,12 @@ class TestCase:
if cmd.does_rc_differ() or cmd.does_stdout_differ() or cmd.does_stderr_differ():
cmds_ok = False
- sys.stdout.write('%s\n\n' % '\n'.join(tab(cmd.get_results_string().split('\n'), num_tabs=2)))
+ sys.stdout.write('%s\n' % '\n'.join(tab(cmd.get_results_string().split('\n'), num_tabs=2)))
if not cmds_ok:
break
- sys.stdout.write(join(tab(['Tests Section Completed Successfully: %s\n' % str(cmds_ok)])))
+ sys.stdout.write(join(tab(['Tests Section Completed Successfully: %s\n\n' % str(cmds_ok)])))
return cmds_ok
def __run_verification(self):
@@ -293,11 +325,11 @@ class TestCase:
if cmd.does_rc_differ() or cmd.does_stdout_differ() or cmd.does_stderr_differ():
cmds_ok = False
- sys.stdout.write('%s\n\n' % '\n'.join(tab(cmd.get_results_string().split('\n'), num_tabs=2)))
+ sys.stdout.write('%s\n' % '\n'.join(tab(cmd.get_results_string().split('\n'), num_tabs=2)))
if not cmds_ok:
break
- sys.stdout.write(join(tab(['Verification Section Completed Successfully: %s\n' % str(cmds_ok)])))
+ sys.stdout.write(join(tab(['Verification Section Completed Successfully: %s\n\n' % str(cmds_ok)])))
return cmds_ok
-
\ No newline at end of file
+
diff --git a/testing/tests/import_xccdf_tgz/commands/import.stdout b/testing/tests/import_xccdf_tgz/commands/import.stdout
index 177b335..8151aeb 100644
--- a/testing/tests/import_xccdf_tgz/commands/import.stdout
+++ b/testing/tests/import_xccdf_tgz/commands/import.stdout
@@ -1,6 +1,7 @@
2-19PasswordComplexity_Lowercase.xml
-2-22PasswordComplexity_Special.xml
2-20PasswordComplexity_MinLen.xml
2-21PasswordComplexity_Numeric.xml
-PassComp.xccdf.xml
2-23PasswordComplexity_Uppercase.xml
+2-22PasswordComplexity_Special.xml
+PassComp.xccdf.xml
+
diff --git a/testing/tests/import_xccdf_tgz/files/output/var/lib/secstate/configs/PassComp.cfg b/testing/tests/import_xccdf_tgz/files/output/var/lib/secstate/configs/PassComp.cfg
index 46852ed..e589a45 100644
--- a/testing/tests/import_xccdf_tgz/files/output/var/lib/secstate/configs/PassComp.cfg
+++ b/testing/tests/import_xccdf_tgz/files/output/var/lib/secstate/configs/PassComp.cfg
@@ -1,4 +1,4 @@
[PassComp]
selected = True
-file = PassComp.xccdf.xml
+file = /var/lib/secstate/benchmarks/PassComp/PassComp.xccdf.xml
diff --git a/testing/tests/import_xccdf_tgz/test.manifest b/testing/tests/import_xccdf_tgz/test.manifest
index e6ed303..566cd6f 100644
--- a/testing/tests/import_xccdf_tgz/test.manifest
+++ b/testing/tests/import_xccdf_tgz/test.manifest
@@ -1,5 +1,4 @@
[requires]
-usage
[test]
import
--
1.6.5.2
13 years, 10 months