cas | 87 ++++++++++++++++++++++++++++----------------------------
cas-admin | 38 ++++++++++++++----------
cas.conf | 4 +-
lib/cas/core.py | 2 +
lib/cas/util.py | 61 +++++++++++++++++++++------------------
version | 2 -
6 files changed, 107 insertions(+), 87 deletions(-)
New commits:
commit 1682f17099ae9920c5f375cc94125e783d8eb89e
Author: Adam Stokes <adam(a)conans.battleaxe>
Date: Mon Dec 15 13:39:11 2008 -0500
- some issues rose when running cas outside of the processed directory
hopefully fixed, however, note that if you use anything less than
python 2.6 shutil.move does a copy rather than just a rename which
can add to the wait time :(
- more work done on the logger, so far seems easy to implement in the code
just need to carry the object over to my different classes.
diff --git a/cas b/cas
index 307bc04..7674104 100755
--- a/cas
+++ b/cas
@@ -25,7 +25,7 @@ from subprocess import Popen, PIPE
from cas.network import Download
from cas.core import CoreBase, CoreException
-from cas.util import UtilBase, dprint, cprint
+from cas.util import UtilBase, Logging
from cas.rpmutils import RPMBase
if sys.version_info[:2] < (2,4):
@@ -36,64 +36,69 @@ config = ConfigParser.ConfigParser()
config.read("/etc/cas.conf")
WORKDIRECTORY = config.get("settings","workDirectory")
RPMS = config.get("settings","rpms")
-DPRINT = config.get("settings","dprint")
+DEBUGLEVEL = config.get("settings","debugLevel")
SERVERS = config.get("settings", "servers")
SMTPHOST = config.get("settings", "mailServer")
class CoreHandler(object):
- def __init__(self, filename, dst, is_url):
- self.filename = filename # abspath of file or url
- self.is_url = is_url
+ def __init__(self, filename, dst, logger):
+ self.filename = filename
self.dst = dst
+ self.casLog = logger
self.tool = CoreBase()
def run(self):
- if self.is_url:
- # return abspath of location of downloaded corefile
+ if(self.filename.startswith("http") or
self.filename.startswith("ftp")):
+ # filename is a url, process it with our download module
+ # this should return the the abspath to our processed directory
+ # and downloaded file
self.filename = Download(self.filename, self.dst).get()
if not os.path.isfile(self.filename):
- dprint("Unable to find file %s" % (self.filename,), DPRINT)
+ # not a url in this case so tests for local existence
+ # TODO: add support to check remote hosts
+ self.casLog.debug("Unable to find file %s" % (self.filename,))
sys.exit(1)
if self.tool.isCorefile(self.filename):
# No need to proceed to extracting corefile since we assume
# this is already at the proper stage.
shutil.move(self.filename,
os.path.join(self.dst, self.filename))
- return os.path.join(self.dst, self.filename)
+ self.filename = os.path.join(self.dst, self.filename)
+ return self.filename
try:
- cprint("Detected compressed archive, extracting.")
+ self.casLog.info("Detected a compressed core, extracting")
corepath = self.tool.extractCore(self.filename, self.dst)
# corefile extracted now move it to work directory, pull basename
# from corepath since we auto-detect the core file from extraction
shutil.move(corepath,os.path.join(self.dst, os.path.basename(corepath)))
- return os.path.join(self.dst, os.path.basename(corepath))
+ self.filename = os.path.join(self.dst, os.path.basename(corepath))
+ return self.filename
except CoreException, err:
- dprint(err, DPRINT)
+ self.casLog.debug(err)
sys.exit(1)
class TimestampHandler(object):
- def __init__(self, corefile):
+ def __init__(self, corefile, logger):
self.corefile = corefile
+ self.casLog = logger
self.util = UtilBase()
self.tool = CoreBase()
def run(self):
# dig through the buildstamp database and attempt to match it with the
# one found in the core
- cprint("Running timestamp on %s" % (self.corefile,))
rpmDB = self.util.load(RPMS)
try:
coreTimestamp = self.tool.timestamp(self.corefile)
except CoreException, err:
- dprint(err, DPRINT)
+ self.casLog.debug(err)
sys.exit(1)
for k,v in rpmDB.iteritems():
for coreObj in rpmDB[k]:
debugKernel, timestamp = coreObj
if timestamp and coreTimestamp in timestamp:
- cprint("Timestamp found %s" % (coreTimestamp,))
return (k, debugKernel)
- dprint("Unable to match (%s,%s) with debugKernel" % (self.corefile,
coreTimestamp), DPRINT)
+ self.casLog.debug("Unable to match timestamp %s/%s" %
(timestamp,debugKernel))
sys.exit(1)
class CasApplication(object):
@@ -121,29 +126,25 @@ class CasApplication(object):
parser.error("A file object is missing.")
self.filename = self.opts.filename
- self.is_url = True
- # not a url, so we assume its a local file, pre-pend absolute path
- if (not self.filename.startswith("http") or not
self.filename.startswith("ftp")):
- self.filename = os.path.abspath(self.filename)
- self.is_url = False
self.identifier = self.opts.identifier
self.email = self.opts.email
-
self.storagePath = os.path.join(WORKDIRECTORY, self.identifier)
+
+ # build logger object to deal with logging per job and keep things
+ # clean and easy to debug
+ self.casLog = Logging(self.storagePath, self.identifier)
def run(self):
# setup directory structure
if not os.path.isdir(self.storagePath):
os.makedirs(self.storagePath)
- cprint("Starting job at %s on file %s" % (self.storagePath,
self.filename))
- # change into processed directory for completion
- # os.chdir(self.storagePath)
+ self.casLog.info("Starting job at %s" % (self.storagePath,))
# begin core extraction analysis
- corefile = CoreHandler(self.filename, self.storagePath, self.is_url).run()
- cprint("Corefile prepared, processing %s" % (corefile,))
- debuginfo, debugKernel = TimestampHandler(corefile).run()
+ corefile = CoreHandler(self.filename, self.storagePath, self.casLog).run()
+ self.casLog.info("Corefile prepared, processing %s" % (corefile,))
+ debuginfo, debugKernel = TimestampHandler(corefile, self.casLog).run()
filterString = "*/%s" % (debugKernel,)
- cprint("Extracting debug kernel with filter %s" % (filterString,))
+ self.casLog.info("Extracting debug kernel with filter %s" %
(filterString,))
self.rpmTool.extract(debuginfo, self.storagePath,
filter=filterString,
return_results=False)
@@ -165,49 +166,51 @@ class CasApplication(object):
# is configured we attempt to process the core at another machine.
try:
import func.overlord.client as fc
- cprint("Crash file built, locating suitable %s system for " \
+ self.casLog.info("Crash file built, locating suitable %s system for
" \
"processing" % (debugKernelArch,))
if os.path.isfile(SERVERS):
serverList = self.util.load(SERVERS)
if serverList.has_key(debugKernelArch):
# TODO: Randomize server selection
casProcessMachine = serverList[debugKernelArch][0]
- cprint("Machine %s found, processing crash output" %
(casProcessMachine,))
+ self.casLog.info("Machine %s found, processing crash
output" % (casProcessMachine,))
cmd = os.path.join(self.storagePath,"crash")
client = fc.Overlord(casProcessMachine)
client_dict = client.command.run(cmd)
# Only necessary for debugging why running of crash failed.
# (sts, out, err) = client_dict[client_dict.keys()[0]]
else:
- cprint("No servers available for arch and current system not
"\
+ self.casLog.info("No servers available for arch and current
system not "\
"suitable for processing, please run cas-admin -h
" \
"for more information")
else:
- cprint("No servers database found, please run cas-admin -h for
" \
+ self.casLog.info("No servers database found, please run
cas-admin -h for " \
"more information")
sys.exit(1)
# DONE: Possibly handle this exception more gracefully?
except ImportError:
- cprint("Current running machine is not suitable for processing this
core " \
+ self.casLog.info("Current running machine is not suitable for
processing this core " \
"and
http://fedorahosted.org/func is not installed/configured
properly.")
- cprint("Finishing job without processing the core, please find a
suitable %s "\
+ self.casLog.info("Finishing job without processing the core, please
find a suitable %s "\
"machine in order to view this core in crash." %
(debugKernelArch,))
else:
import platform
# Define current machine hostname, mainly used for email results.
casProcessMachine = platform.uname()[1]
# The machine is suitable for processing the core through crash.
- cprint("Current machine suitable for processing core, running
crash.")
+ self.casLog.info("Current machine suitable for processing core, running
crash.")
cmd = os.path.join(self.storagePath,"crash")
# DONE: capture any errors returned from crash when processing core.
cmdPipe = Popen([cmd], stdout=PIPE, stderr=PIPE)
cmdData = cmdPipe.communicate()
- sts, out, err = (cmdPipe.returncode, cmdData[0], cmdData[1])
- if err:
- dprint(err, DPRINT)
+ # pull status code to verify crash even ran to completeness
+ sts, out, err = (cmdPipe.returncode, cmdData[0].strip(),
+ cmdData[1].strip())
+ if sts:
+ self.casLog.debug("crash problem: err: %s, out: %s" % (err,
out))
crashOutFile = os.path.join(self.storagePath,"crash.out")
if os.path.isfile(crashOutFile):
- cprint("Crash output processed, sending email to %s" %
(self.email,))
+ self.casLog.info("Crash output processed, sending email to %s" %
(self.email,))
try:
# Compose email msg of results
msg = "Subject: CAS results for %s\r\n\n" % (self.identifier,)
@@ -229,7 +232,7 @@ class CasApplication(object):
mailServer.quit()
except smtplib.SMTPException, e:
dprint(e)
- cprint("Job on %s complete and located in %s." % (self.filename,
+ self.casLog.info("Job on %s complete and located in %s." %
(self.filename,
self.storagePath))
return
diff --git a/cas-admin b/cas-admin
index d441f2c..a852be0 100755
--- a/cas-admin
+++ b/cas-admin
@@ -21,7 +21,7 @@ import sys
import urlparse
from cas.core import CoreBase
-from cas.util import UtilBase, sprint, dprint, cprint
+from cas.util import UtilBase, Logging
from cas.rpmutils import RPMBase
from subprocess import Popen, PIPE
from shutil import rmtree
@@ -36,11 +36,12 @@ KERNELS = config.get("settings","kernels")
RPMS = config.get("settings","rpms")
RPMFILTER = config.get("settings","rpmFilter")
DEBUGS = config.get("settings","debugs")
-DPRINT = config.get("settings","dprint")
+DEBUGLEVEL = config.get("settings","debugLevel")
SERVERS = config.get("settings","servers")
class CasDatabaseHandler(object):
- def __init__(self):
+ def __init__(self, logger):
+ self.casLog = logger
self.util = UtilBase()
if os.path.isfile(RPMS):
self.rpmDB = self.util.load(RPMS)
@@ -59,9 +60,13 @@ class CasDatabaseHandler(object):
# Uses emacs regex -- see `man find`
cmd = ["find", "-L", KERNELS, "-iregex",
RPMFILTER]
pipe = Popen(cmd, stdout=PIPE, stderr=PIPE)
+ # setup count for kernels found, mainly for console output
+ count = 0
for line in pipe.stdout:
rpms.append(line.strip())
- sprint("(found) %-100s" % (os.path.basename(line.strip()),))
+ self.casLog.status("(found) %-5d kernel(s)" % (count,))
+ count = count + 1
+ # reset count for further informational messaging
totalRpms = len(rpms)
count = 0
""" Build database out in the form of
@@ -74,7 +79,8 @@ class CasDatabaseHandler(object):
count = count + 1
dst = os.path.join(DEBUGS, str(count))
rpmTool = RPMBase()
- sprint("(extracting) [%d/%d] %-100s" % (count, totalRpms,
os.path.basename(x)))
+ self.casLog.status("(extracting) [%d/%d] %-50s" % (count,
totalRpms,
+ os.path.basename(x)))
results = rpmTool.extract(x, dst)
# Sort through extracted debug for each type
# e.g. hugemem, PAE, smp, largesmp
@@ -85,14 +91,15 @@ class CasDatabaseHandler(object):
timestamp = stamper.timestamp(debugKernel)
# Build tuple of each debug type
self.rpmDB[x].append((debugKernel, timestamp))
- sprint("(timestamp) %-100s" % (debugKernel,))
+ self.casLog.status("(timestamp) gathering timestamp ..")
self.util.save(self.rpmDB, RPMS)
# Cleanup extracted debugs
rmtree(dst)
return
class CasServerHandler(object):
- def __init__(self):
+ def __init__(self, logger):
+ self.casLog = logger
self.util = UtilBase()
def run(self):
@@ -123,16 +130,17 @@ class CasServerHandler(object):
serverList[arch] = [hostname]
hostname_count = hostname_count + 1
self.util.save(serverList, SERVERS)
- dprint("Server database built with %d server(s) added.\n" %
(hostname_count,))
+ self.casLog.debug("Server database built with %d server(s)
added.\n" % (hostname_count,))
except ImportError:
- dprint("Please install func (
http://fedorahosted.org/func) for " \
- "an automated machine arch population.\n", DPRINT)
+ self.casLog.debug("Please install func (
http://fedorahosted.org/func)
for " \
+ "an automated machine arch population.\n")
sys.exit(1)
return
class CasAdminApplication(object):
def __init__(self, args):
self.parse_options(args)
+ self.casLog = Logging("/var/log","cas-admin")
def parse_options(self, args):
parser = optparse.OptionParser(usage="casprint [opts] args")
@@ -156,13 +164,13 @@ class CasAdminApplication(object):
os.makedirs(DEBUGS)
if self.buildDB:
- cprint("Starting CAS DB instance.")
- dbHandler = CasDatabaseHandler().run()
+ self.casLog.info("Starting CAS DB instance.")
+ dbHandler = CasDatabaseHandler(self.casLog).run()
elif self.server_init:
- cprint("Building CAS Server DB instance.")
- serverHandler = CasServerHandler().run()
+ self.casLog.info("Building CAS Server DB instance.")
+ serverHandler = CasServerHandler(self.casLog).run()
else:
- cprint("Missing options, please run with --help.")
+ self.casLog.info("Missing options, please run with --help.")
sys.exit(1)
if __name__=="__main__":
diff --git a/cas.conf b/cas.conf
index 950b791..774b4f6 100644
--- a/cas.conf
+++ b/cas.conf
@@ -16,8 +16,8 @@ rpmFilter=.*kernel-debuginfo-[0-9].*\.rpm
# so you could essentially set this to /tmp if space permitted
debugs=/cores/debugs
-# debug print on? (True/False)
-dprint=True
+# debug level (DEBUG, INFO)
+debugLevel=DEBUG
# define work directory
workDirectory=/cores/processed
diff --git a/lib/cas/core.py b/lib/cas/core.py
index 937df39..75c1cb4 100644
--- a/lib/cas/core.py
+++ b/lib/cas/core.py
@@ -63,6 +63,8 @@ class CoreBase(object):
raise CoreException("Can not determine compression format.")
else:
format.append(self.filepath)
+ # TODO: figure out someway to print some status to the screen
+ # during extraction
p = Popen(format, stdout=PIPE, stderr=PIPE)
err = p.stderr.read()
out = p.stdout.read()
diff --git a/lib/cas/util.py b/lib/cas/util.py
index 8086993..59cb65f 100755
--- a/lib/cas/util.py
+++ b/lib/cas/util.py
@@ -21,34 +21,41 @@ import logging
from subprocess import Popen, PIPE, call
-# setup logging
-logFile = '/var/log/cas.log'
-log = logging.getLogger()
-ch = logging.StreamHandler()
-fh = logging.FileHandler(logFile)
-log.addHandler(ch)
-log.addHandler(fh)
-ch_fmt = logging.Formatter("%(message)s")
-fh_fmt = logging.Formatter("%(asctime)s %(process)d (%(levelname)s)\t:
%(message)s")
-ch.setFormatter(ch_fmt)
-fh.setFormatter(fh_fmt)
-
-def dprint(msg, debug=True):
- if debug:
- log.setLevel(logging.DEBUG)
- log.debug("[.cas.][debug] :: " +msg)
-
-def sprint(msg):
- """ function to print status messages
- """
- sys.stdout.write("[.cas.] :: " + msg + "\r")
- sys.stdout.flush()
+class Logging(object):
+ def __init__(self, dst, logger_id, debug_level='DEBUG'):
+ self.dst = dst
+ self.logger_id = logger_id
+ self.logfile = logger_id+".log"
+ self.logfile = os.path.join(self.dst, self.logfile)
+ self.log = logging.getLogger(self.logger_id)
+ ch = logging.StreamHandler()
+ # never fail if directory doesn't exist.
+ if not os.path.exists(os.path.dirname(self.logfile)):
+ os.makedirs(os.path.dirname(self.logfile))
+ fh = logging.FileHandler(self.logfile)
+ self.log.addHandler(ch)
+ self.log.addHandler(fh)
+ ch_fmt = logging.Formatter("%(message)s")
+ fh_fmt = logging.Formatter("%(asctime)s %(process)d (%(levelname)s)\t:
%(message)s")
+ ch.setFormatter(ch_fmt)
+ fh.setFormatter(fh_fmt)
+ logging_level = {'DEBUG':logging.DEBUG,
+ 'INFO':logging.INFO}
+ self.log.setLevel(logging_level[debug_level])
+
+ def debug(self, msg):
+ self.log.debug("[.cas.][debug] :: %s" % (msg,))
-def cprint(msg):
- """ function to print procedure
- """
- log.setLevel(logging.INFO)
- log.info("[.cas.] :: " +msg)
+ def status(self, msg):
+ """ function to print status messages
+ """
+ sys.stdout.write("[.cas.] :: " + msg + "\r")
+ sys.stdout.flush()
+
+ def info(self, msg):
+ """ function to print procedure
+ """
+ self.log.info("[.cas.] :: %s" % (msg,))
class UtilException(Exception): pass
diff --git a/version b/version
index 88e3999..50c8612 100644
--- a/version
+++ b/version
@@ -1 +1 @@
-0.13 97
+0.13 102