|
#!/bin/sh
#
# Startup script for the Apache Web Server
#
# chkconfig: - 85 15
# description: Apache is a World Wide Web server. It is used to serve \
# HTML files and CGI.
# processname: httpd
# pidfile: /var/run/httpd.pid
# config: /etc/httpd/conf/access.conf
# config: /etc/httpd/conf/httpd.conf
# config: /etc/httpd/conf/srm.conf
ulimit -HSn 32768
# Source function library.
. /usr/local/etc/rc.d/functions
# This will prevent initlog from swallowing up a pass-phrase prompt if
# mod_ssl needs a pass-phrase from the user.
INITLOG_ARGS=""
# Path to the apachectl script, server binary, and short-form for messages.
apachectl=/usr/sbin/apachectl
httpd=/usr/sbin/httpd
prog=httpd
RETVAL=0
PIDFILE=/var/run/httpd.pid
# check for 1.3 configuration
check13 () {
CONFFILE=/etc/httpd/conf/httpd.conf
GONE="(ServerType|BindAddress|Port|AddModule|ClearModuleList|"
GONE="${GONE}AgentLog|RefererLog|RefererIgnore|FancyIndexing|"
GONE="${GONE}AccessConfig|ResourceConfig)"
if grep -Eiq "^[[:space:]]*($GONE)" $CONFFILE; then
echo
echo 1>&2 " Apache 1.3 configuration directives found"
echo 1>&2 " please read @docdir@/migration.html"
failure "Apache 1.3 config directives test"
echo
exit 1
fi
}
# The semantics of these two functions differ from the way apachectl does
# things -- attempting to start while running is a failure, and shutdown
# when not running is also a failure. So we just do it the way init scripts
# are expected to behave here.
start() {
echo -n "Starting $prog: "
check13 || exit 1
daemon $httpd -k start -DSSL
RETVAL=$?
if [ $RETVAL = 0 ] && touch /var/spool/lock/httpd
then
echo -e "\t[ OK ]";
else
echo -e "\t[ FAILED ]";
fi
return $RETVAL
}
waitforexit() {
count=${2:-30}
while [ 0$count -gt 0 ]
do
PIDS=`ps -ax | grep -v grep | grep -c /usr/sbin/httpd` || break
echo Remaining processes: $PIDS
stop
sleep 2
count=`expr $count - 1`
done
if [ 0$count -eq 0 ];
then
echo Remaining processes: $PIDS
return 1
fi
return 0
}
stop() {
echo -n "Stopping $prog: "
killall $prog 2> /dev/null
RETVAL=$?
if [ $RETVAL = 0 ] && rm -f /var/spool/lock/httpd $PIDFILE
then
echo -e "\t[ OK ]";
else
echo -e "\t[ FAILED ]";
fi
}
reload() {
echo -n "Reloading $prog: "
check13 || exit 1
killall -HUP $prog
RETVAL=$?
if [ $RETVAL = 0 ]
then
echo -e "\t[ OK ]";
else
echo -e "\t[ FAILED ]";
fi
}
# See how we were called.
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status $httpd
RETVAL=$?
;;
restart)
stop
waitforexit "httpd" 20
start
;;
condrestart)
if [ -f $PIDFILE ] ; then
stop
start
fi
;;
reload)
reload
;;
help|configtest|graceful|fullstatus)
$apachectl $@
RETVAL=$?
;;
test)
echo `moduleargs`;
;;
*)
echo $"Usage: $prog {start|stop|restart|condrestart|reload|status|fullstatus|graceful|help|configtest}"
exit 1
esac
exit $RETVAL
#!/bin/bash
#
# ***** BEGIN LICENSE BLOCK *****
# Zimbra Collaboration Suite Server
# Copyright (C) 2005, 2007, 2008 Zimbra, Inc.
#
# The contents of this file are subject to the Yahoo! Public License
# Version 1.0 ("License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.zimbra.com/license.
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
# ***** END LICENSE BLOCK *****
#
BASE=/opt/zimbra
APACHECTL=$BASE/httpd/bin/apachectl
CONF=$BASE/conf/httpd.conf
PIDFILE=$BASE/log/httpd.pid
case "$1" in
start)
if [ -f $APACHECTL ]; then
echo -n "Starting apache..."
$APACHECTL -k $1 -f $CONF
status=$?
if [ $status = 0 ]; then
echo "done."
else
echo "failed."
fi
exit $status
fi
exit 0
;;
reload|graceful)
if [ -f $APACHECTL ]; then
echo -n "Reloading apache..."
$APACHECTL -k graceful -f $CONF
status=$?
if [ $status = 0 ]; then
echo "done."
else
echo "failed."
fi
exit $status
fi
exit 0
;;
restart)
$0 stop
sleep 1
$0 start
;;
stop)
if [ -f $PIDFILE ]; then
if [ -f $APACHECTL ]; then
echo -n "Stopping apache..."
$APACHECTL -k $1 -f $CONF
status=$?
if [ $status = 0 ]; then
echo "done."
else
echo "failed."
fi
exit $status
fi
fi
exit 0
;;
status)
if [ -f $PIDFILE ]; then
pid=$(cat $PIDFILE)
if [ x"$pid" = "x" ]; then
echo "apache is not running."
exit 1
fi
else
echo "apache is not running."
exit 1
fi
kill -0 $pid
if [ $? = 0 ]; then
echo "apache is running."
exit 0
else
echo "apache is not running."
exit 1
fi
;;
*)
echo "$0 start|stop|restart|reload|graceful|status"
exit 1
;;
esac
echo
is_uid=`/usr/bin/id | /usr/bin/awk -F\= {'print $2'} | /usr/bin/awk -F\( {'print $1'}`
if [ $is_uid -ne 0 ] ; then
echo "You are not logged on as root"
exit ;
fi
# DNS, name server, bind ;
case "$1" in
'dns'|'bind'|'named')
if [ -f "/usr/local/etc/named.pid" ] ; then
named_pid=`cat /usr/local/etc/named.pid`
kill -HUP $named_pid
echo "DNS restarted $named_pid"
/usr/bin/ps -ef | /usr/bin/fgrep -v grep | /usr/bin/fgrep -v restart | /usr/bin/fgrep name | /usr/bin/awk {'print $9'}
else
echo "You don't seem to be running the BIND nameserver"
echo "with a pid in /usr/local/etc/bind.conf"
fi
;;
'apache'|'http'|'httpd'|'web')
if [ -f "/usr/local/apache/logs/httpd.pid" ] ; then
httpd_pid=`cat /usr/local/apache/logs/httpd.pid`
else
httpd_pid="not in /usr/local/apache/logs/httpd.pid"
fi
if [ -f "/usr/local/apache/bin/apachectl" ] ; then
/usr/local/apache/bin/apachectl restart
echo "Apache restarting"
sleep 1 ;
/usr/bin/ps -ef | /usr/bin/fgrep -v grep | /usr/bin/fgrep -v restart | /usr/bin/fgrep apach | /usr/bin/awk {'print $8'}
echo "Parent process is $httpd_pid"
else
echo "You don't have the apachectl file in /usr/local/apache/bin"
fi
;;
'mail'|'sendmail')
sendmail_pid=0
if [ -f /etc/mail/sendmail.pid ]; then
sendmail_pid=`/usr/bin/head -1 /etc/mail/sendmail.pid`
kill -HUP $sendmail_pid
echo "Sendmail restarted $sendmail_pid"
/usr/bin/ps -ef | /usr/bin/fgrep -v grep | /usr/bin/fgrep -v restart | /usr/bin/fgrep mail | /usr/bin/awk {'print $8 " " $9 " " $10'}
echo $sendmail_pid
fi
if [ -f /var/run/sendmail.pid ]; then
sendmail_pid=`/usr/bin/head -1 /var/run/sendmail.pid`
kill -HUP $sendmail_pid
echo "Sendmail restarted $sendmail_pid"
/usr/bin/ps -ef | /usr/bin/fgrep -v grep | /usr/bin/fgrep -v restart | /usr/bin/fgrep mail | /usr/bin/awk {'print $8 " " $9 " " $10'}
echo $sendmail_pid
fi
if [ $sendmail_pid -eq 0 ]; then
echo "You don't have the sendmail parent process"
echo "number in /etc/mail/sendmail.pid"
echo "or in /var/run/sendmail.pid"
fi
;;
'syslog')
if [ -f /etc/syslog.pid ] ; then
syslog_pid=`cat /etc/syslog.pid`
kill -HUP $syslog_pid
echo "Syslog restarted $syslog_pid"
/usr/bin/ps -ef | /usr/bin/fgrep -v grep | /usr/bin/fgrep -v restart | /usr/bin/fgrep syslog | /usr/bin/awk {'print $9'}
else
echo "/etc/syslog.pid does not exist"
fi
;;
'inetd')
pkill -HUP inetd
echo "Inetd restarted $inetd_pid"
/usr/bin/ps -ef | /usr/bin/fgrep -v grep | /usr/bin/fgrep -v restart | /usr/bin/fgrep inetd | /usr/bin/awk {'print $8 " " $9 " " $10'}
;;
*)
echo "Syntax Error -> restart option"
echo "restart apache|bind|dns|http|httpd|inetd|mail|named|syslog|web"
echo
;;
esac
echo
exit
#!/bin/bash
#
# apache Start the apache HTTP server.
#
# The variables below are NOT to be changed. They are there to make the
# script more readable.
NAME=apache
DAEMON=/usr/sbin/$NAME
PIDFILE=/var/run/$NAME.pid
CONF=/etc/$NAME/httpd.conf
APACHECTL=/usr/sbin/${NAME}ctl
# note: SSD is required only at startup of the daemon.
SSD=`which start-stop-daemon`
ENV="env -i LANG=C PATH=/bin:/usr/bin:/usr/local/bin"
SHIB_HOME=/opt/shibboleth-1.3
LD_LIBRARY_PATH=${SHIB_HOME}/libexec:${SHIB_HOME}/lib
export LD_LIBRARY_PATH
trap "" 1
# Check that we're not being started by inetd
if egrep -q -i "^[[:space:]]*ServerType[[:space:]]+inet" $CONF
then
exit 0
fi
test_config() {
if [ ! -x $APACHECTL ]; then
echo "$APACHECTL is not executable, exiting"
exit 0
fi
# ensure we don't leak environment vars into apachectl
APACHECTL="$ENV $APACHECTL"
if ! $APACHECTL configtest 2> /dev/null
then
printf "Configuration syntax error detected. Not reloading.\n\n"
$APACHECTL configtest
exit 1
fi
}
should_start() {
if [ ! -x $DAEMON ]; then
echo "apache is not executable, not starting"
exit 0
fi
}
case "$1" in
start)
should_start
test_config
echo -n "Starting web server: $NAME"
$ENV $SSD --start --pidfile $PIDFILE --exec $DAEMON > /dev/null
;;
stop)
echo -n "Stopping web server: $NAME"
start-stop-daemon --stop --pidfile $PIDFILE --oknodo
rm -rf /var/lib/apache/mod-bandwidth/link/*
;;
reload | force-reload)
test_config
echo -n "Reloading $NAME configuration"
start-stop-daemon --stop --pidfile $PIDFILE --signal USR1
;;
reload-modules)
test_config
echo -n "Reloading $NAME modules"
start-stop-daemon --stop --pidfile $PIDFILE --oknodo --retry 30
should_start
$ENV $SSD --start --pidfile $PIDFILE --exec $DAEMON > /dev/null
;;
restart)
test_config
echo -n "Restarting $NAME"
if ! start-stop-daemon -q --stop --pidfile $PIDFILE --signal HUP; then
$ENV $SSD --start --pidfile $PIDFILE --exec $DAEMON > /dev/null
fi
;;
*)
echo "Usage: /etc/init.d/$NAME {start|stop|reload|reload-modules|force-reload|restart}"
exit 1
;;
esac
if [ $? -eq 0 ]; then
echo .
exit 0
else
echo " failed"
exit 1
fi
#!/bin/bash
KNOWN_PATH="/usr/sbin /sbin /opt/apache2/bin /usr/local/sbin"
HTTPD_DIR=
for path in $KNOWN_PATH; do
echo "trying $path/httpd..."
if `ls $path/httpd > /dev/null 2>&1`; then
HTTPD_DIR=$path
break
fi
done
if [ -z $HTTPD_DIR ]; then
echo "httpd path not found, please enter the path to directory where httpd is"
echo -n "httpd path : "
read HTTPD_DIR
fi
HTTPD="$HTTPD_DIR/httpd"
if [ ! -x $HTTPD ]; then
echo "httpd could not be found, exiting..."
exit 1
fi
$HTTPD -V
FILE SPACING:
# double space a file
sed G
# double space a file which already has blank lines in it. Output file
# should contain no more than one blank line between lines of text.
sed '/^$/d;G'
# triple space a file
sed 'G;G'
# undo double-spacing (assumes even-numbered lines are always blank)
sed 'n;d'
# insert a blank line above every line which matches "regex"
sed '/regex/{x;p;x;}'
# insert a blank line below every line which matches "regex"
sed '/regex/G'
# insert a blank line above and below every line which matches "regex"
sed '/regex/{x;p;x;G;}'
NUMBERING:
# number each line of a file (simple left alignment). Using a tab (see
# note on 't' at end of file) instead of space will preserve margins.
sed = filename | sed 'N;s/n/t/'
# number each line of a file (number on left, right-aligned)
sed = filename | sed 'N; s/^/ /; s/ *(.{6,})n/1 /'
# number each line of file, but only print numbers if line is not blank
sed '/./=' filename | sed '/./N; s/n/ /'
# count lines (emulates "wc -l")
sed -n '$='
TEXT CONVERSION AND SUBSTITUTION:
# IN UNIX ENVIRONMENT: convert DOS newlines (CR/LF) to Unix format.
sed 's/.$//' # assumes that all lines end with CR/LF
sed 's/^M$//' # in bash/tcsh, press Ctrl-V then Ctrl-M
sed 's/x0D$//' # works on ssed, gsed 3.02.80 or higher
# IN UNIX ENVIRONMENT: convert Unix newlines (LF) to DOS format.
sed "s/$/`echo -e r`/" # command line under ksh
sed 's/$'"/`echo r`/" # command line under bash
sed "s/$/`echo r`/" # command line under zsh
sed 's/$/r/' # gsed 3.02.80 or higher
# IN DOS ENVIRONMENT: convert Unix newlines (LF) to DOS format.
sed "s/$//" # method 1
sed -n p # method 2
# IN DOS ENVIRONMENT: convert DOS newlines (CR/LF) to Unix format.
# Can only be done with UnxUtils sed, version 4.0.7 or higher. The
# UnxUtils version can be identified by the custom "--text" switch
# which appears when you use the "--help" switch. Otherwise, changing
# DOS newlines to Unix newlines cannot be done with sed in a DOS
# environment. Use "tr" instead.
sed "s/r//" infile >outfile # UnxUtils sed v4.0.7 or higher
tr -d r <infile >outfile # GNU tr version 1.22 or higher
# delete leading whitespace (spaces, tabs) from front of each line
# aligns all text flush left
sed 's/^[ t]*//' # see note on 't' at end of file
# delete trailing whitespace (spaces, tabs) from end of each line
sed 's/[ t]*$//' # see note on 't' at end of file
# delete BOTH leading and trailing whitespace from each line
sed 's/^[ t]*//;s/[ t]*$//'
# insert 5 blank spaces at beginning of each line (make page offset)
sed 's/^/ /'
# align all text flush right on a 79-column width
sed -e :a -e 's/^.{1,78}$/ &/;ta' # set at 78 plus 1 space
# center all text in the middle of 79-column width. In method 1,
# spaces at the beginning of the line are significant, and trailing
# spaces are appended at the end of the line. In method 2, spaces at
# the beginning of the line are discarded in centering the line, and
# no trailing spaces appear at the end of lines.
sed -e :a -e 's/^.{1,77}$/ & /;ta' # method 1
sed -e :a -e 's/^.{1,77}$/ &/;ta' -e 's/( *)1/1/' # method 2
# substitute (find and replace) "foo" with "bar" on each line
sed 's/foo/bar/' # replaces only 1st instance in a line
sed 's/foo/bar/4' # replaces only 4th instance in a line
sed 's/foo/bar/g' # replaces ALL instances in a line
sed 's/(.*)foo(.*foo)/1bar2/' # replace the next-to-last case
sed 's/(.*)foo/1bar/' # replace only the last case
# substitute "foo" with "bar" ONLY for lines which contain "baz"
sed '/baz/s/foo/bar/g'
# substitute "foo" with "bar" EXCEPT for lines which contain "baz"
sed '/baz/!s/foo/bar/g'
# change "scarlet" or "ruby" or "puce" to "red"
sed 's/scarlet/red/g;s/ruby/red/g;s/puce/red/g' # most seds
gsed 's/scarlet|ruby|puce/red/g' # GNU sed only
# reverse order of lines (emulates "tac")
# bug/feature in HHsed v1.5 causes blank lines to be deleted
sed '1!G;h;$!d' # method 1
sed -n '1!G;h;$p' # method 2
# reverse each character on the line (emulates "rev")
sed '/n/!G;s/(.)(.*n)/&21/;//D;s/.//'
# join pairs of lines side-by-side (like "paste")
sed '$!N;s/n/ /'
# if a line ends with a backslash, append the next line to it
sed -e :a -e '/$/N; s/n//; ta'
# if a line begins with an equal sign, append it to the previous line
# and replace the "=" with a single space
sed -e :a -e '$!N;s/n=/ /;ta' -e 'P;D'
# add commas to numeric strings, changing "1234567" to "1,234,567"
gsed ':a;s/B[0-9]{3}>/,&/;ta' # GNU sed
sed -e :a -e 's/(.*[0-9])([0-9]{3})/1,2/;ta' # other seds
# add commas to numbers with decimal points and minus signs (GNU sed)
gsed -r ':a;s/(^|[^0-9.])([0-9]+)([0-9]{3})/12,3/g;ta'
# add a blank line every 5 lines (after lines 5, 10, 15, 20, etc.)
gsed '0~5G' # GNU sed only
sed 'n;n;n;n;G;' # other seds
SELECTIVE PRINTING OF CERTAIN LINES:
# print first 10 lines of file (emulates behavior of "head")
sed 10q
# print first line of file (emulates "head -1")
sed q
# print the last 10 lines of a file (emulates "tail")
sed -e :a -e '$q;N;11,$D;ba'
# print the last 2 lines of a file (emulates "tail -2")
sed '$!N;$!D'
# print the last line of a file (emulates "tail -1")
sed '$!d' # method 1
sed -n '$p' # method 2
# print the next-to-the-last line of a file
sed -e '$!{h;d;}' -e x # for 1-line files, print blank line
sed -e '1{$q;}' -e '$!{h;d;}' -e x # for 1-line files, print the line
sed -e '1{$d;}' -e '$!{h;d;}' -e x # for 1-line files, print nothing
# print only lines which match regular expression (emulates "grep")
sed -n '/regexp/p' # method 1
sed '/regexp/!d' # method 2
# print only lines which do NOT match regexp (emulates "grep -v")
sed -n '/regexp/!p' # method 1, corresponds to above
sed '/regexp/d' # method 2, simpler syntax
# print the line immediately before a regexp, but not the line
# containing the regexp
sed -n '/regexp/{g;1!p;};h'
# print the line immediately after a regexp, but not the line
# containing the regexp
sed -n '/regexp/{n;p;}'
# print 1 line of context before and after regexp, with line number
# indicating where the regexp occurred (similar to "grep -A1 -B1")
sed -n -e '/regexp/{=;x;1!p;g;$!N;p;D;}' -e h
# grep for AAA and BBB and CCC (in any order)
sed '/AAA/!d; /BBB/!d; /CCC/!d'
# grep for AAA and BBB and CCC (in that order)
sed '/AAA.*BBB.*CCC/!d'
# grep for AAA or BBB or CCC (emulates "egrep")
sed -e '/AAA/b' -e '/BBB/b' -e '/CCC/b' -e d # most seds
gsed '/AAA|BBB|CCC/!d' # GNU sed only
# print paragraph if it contains AAA (blank lines separate paragraphs)
# HHsed v1.5 must insert a 'G;' after 'x;' in the next 3 scripts below
sed -e '/./{H;$!d;}' -e 'x;/AAA/!d;'
# print paragraph if it contains AAA and BBB and CCC (in any order)
sed -e '/./{H;$!d;}' -e 'x;/AAA/!d;/BBB/!d;/CCC/!d'
# print paragraph if it contains AAA or BBB or CCC
sed -e '/./{H;$!d;}' -e 'x;/AAA/b' -e '/BBB/b' -e '/CCC/b' -e d
gsed '/./{H;$!d;};x;/AAA|BBB|CCC/b;d' # GNU sed only
# print only lines of 65 characters or longer
sed -n '/^.{65}/p'
# print only lines of less than 65 characters
sed -n '/^.{65}/!p' # method 1, corresponds to above
sed '/^.{65}/d' # method 2, simpler syntax
# print section of file from regular expression to end of file
sed -n '/regexp/,$p'
# print section of file based on line numbers (lines 8-12, inclusive)
sed -n '8,12p' # method 1
sed '8,12!d' # method 2
# print line number 52
sed -n '52p' # method 1
sed '52!d' # method 2
sed '52q;d' # method 3, efficient on large files
# beginning at line 3, print every 7th line
gsed -n '3~7p' # GNU sed only
sed -n '3,${p;n;n;n;n;n;n;}' # other seds
# print section of file between two regular expressions (inclusive)
sed -n '/Iowa/,/Montana/p' # case sensitive
SELECTIVE DELETION OF CERTAIN LINES:
# print all of file EXCEPT section between 2 regular expressions
sed '/Iowa/,/Montana/d'
# delete duplicate, consecutive lines from a file (emulates "uniq").
# First line in a set of duplicate lines is kept, rest are deleted.
sed '$!N; /^(.*)n1$/!P; D'
# delete duplicate, nonconsecutive lines from a file. Beware not to
# overflow the buffer size of the hold space, or else use GNU sed.
sed -n 'G; s/n/&&/; /^([ -~]*n).*n1/d; s/n//; h; P'
# delete all lines except duplicate lines (emulates "uniq -d").
sed '$!N; s/^(.*)n1$/1/; t; D'
# delete the first 10 lines of a file
sed '1,10d'
# delete the last line of a file
sed '$d'
# delete the last 2 lines of a file
sed 'N;$!P;$!D;$d'
# delete the last 10 lines of a file
sed -e :a -e '$d;N;2,10ba' -e 'P;D' # method 1
sed -n -e :a -e '1,10!{P;N;D;};N;ba' # method 2
# delete every 8th line
gsed '0~8d' # GNU sed only
sed 'n;n;n;n;n;n;n;d;' # other seds
# delete lines matching pattern
sed '/pattern/d'
# delete ALL blank lines from a file (same as "grep '.' ")
sed '/^$/d' # method 1
sed '/./!d' # method 2
# delete all CONSECUTIVE blank lines from file except the first; also
# deletes all blank lines from top and end of file (emulates "cat -s")
sed '/./,/^$/!d' # method 1, allows 0 blanks at top, 1 at EOF
sed '/^$/N;/n$/D' # method 2, allows 1 blank at top, 0 at EOF
# delete all CONSECUTIVE blank lines from file except the first 2:
sed '/^$/N;/n$/N;//D'
# delete all leading blank lines at top of file
sed '/./,$!d'
# delete all trailing blank lines at end of file
sed -e :a -e '/^n*$/{$d;N;ba' -e '}' # works on all seds
sed -e :a -e '/^n*$/N;/n$/ba' # ditto, except for gsed 3.02.*
# delete the last line of each paragraph
sed -n '/^$/{p;h;};/./{x;/./p;}'
SPECIAL APPLICATIONS:
# remove nroff overstrikes (char, backspace) from man pages. The 'echo'
# command may need an -e switch if you use Unix System V or bash shell.
sed "s/.`echo b`//g" # double quotes required for Unix environment
sed 's/.^H//g' # in bash/tcsh, press Ctrl-V and then Ctrl-H
sed 's/.x08//g' # hex expression for sed 1.5, GNU sed, ssed
# get Usenet/e-mail message header
sed '/^$/q' # deletes everything after first blank line
# get Usenet/e-mail message body
sed '1,/^$/d' # deletes everything up to first blank line
# get Subject header, but remove initial "Subject: " portion
sed '/^Subject: */!d; s///;q'
# get return address header
sed '/^Reply-To:/q; /^From:/h; /./d;g;q'
# parse out the address proper. Pulls out the e-mail address by itself
# from the 1-line return address header (see preceding script)
sed 's/ *(.*)//; s/>.*//; s/.*[:<] *//'
# add a leading angle bracket and space to each line (quote a message)
sed 's/^/> /'
# delete leading angle bracket & space from each line (unquote a message)
sed 's/^> //'
# remove most HTML tags (accommodates multiple-line tags)
sed -e :a -e 's/<[^>]*>//g;/</N;//ba'
# extract multi-part uuencoded binaries, removing extraneous header
# info, so that only the uuencoded portion remains. Files passed to
# sed must be passed in the proper order. Version 1 can be entered
# from the command line; version 2 can be made into an executable
# Unix shell script. (Modified from a script by Rahul Dhesi.)
sed '/^end/,/^begin/d' file1 file2 ... fileX | uudecode # vers. 1
sed '/^end/,/^begin/d' "$@" | uudecode # vers. 2
# sort paragraphs of file alphabetically. Paragraphs are separated by blank
# lines. GNU sed uses v for vertical tab, or any unique char will do.
sed '/./{H;d;};x;s/n/={NL}=/g' file | sort | sed '1s/={NL}=//;s/={NL}=/n/g'
gsed '/./{H;d};x;y/n/v/' file | sort | sed '1s/v//;y/v/n/'
# zip up each .TXT file individually, deleting the source file and
# setting the name of each .ZIP file to the basename of the .TXT file
# (under DOS: the "dir /b" switch returns bare filenames in all caps).
echo @echo off >zipup.bat
dir /b *.txt | sed "s/^(.*).TXT/pkzip -mo 1 1.TXT/" >>zipup.bat
If your system abruptly loses power, or if a RAID card is beginning to fail, you might see an ominous message like this within your logs:
EXT3-fs error (device hda3) in start_transaction: Journal has aborted
|
Basically, the system is telling you that it’s detected a filesystem/journal mismatch, and it can’t utilize the journal any longer. When this situation pops up, the filesystem gets mounted read-only almost immediately. To fix the situation, you can remount the partition as ext2 (if it isn’t your active root partition), or you can commence the repair operations.
If you’re working with an active root partition, you will need to boot into some rescue media and perform these operations there. If this error occurs with an additional partition besides the root partition, simply unmount the broken filesystem and proceed with these operations.
Remove the journal from the filesystem (effectively turning it into ext2):
# tune2fs -O ^has_journal /dev/hda3
|
Now, you will need to fsck it to correct any possible problems (throw in a -y flag to say yes to all repairs, -C for a progress bar):
Once that’s finished, make a new journal which effectively makes the partition an ext3 filesystem again:
You should be able to mount the partition as an ext3 partition at this time:
# mount -t ext3 /dev/hda3 /mnt/fixed
|
Be sure to check your dmesg output for any additional errors after you’re finished!
Today I was asked by a colleague how to match a regex in a text file and return a specific number of lines both before and after the match. GNU grep has a very easy solution for this; the -A/B/C flags, as shown on my Mac
warwick egg$ grep --help
Usage: grep [OPTION]... PATTERN [FILE] ...
Search for PATTERN in each FILE or standard input.
Example: grep -i 'hello world' menu.h main.c
Regexp selection and interpretation:
-E, --extended-regexp PATTERN is an extended regular expression
-F, --fixed-strings PATTERN is a set of newline-separated strings
-G, --basic-regexp PATTERN is a basic regular expression
-P, --perl-regexp PATTERN is a Perl regular expression
-e, --regexp=PATTERN use PATTERN as a regular expression
-f, --file=FILE obtain PATTERN from FILE
-i, --ignore-case ignore case distinctions
-w, --word-regexp force PATTERN to match only whole words
-x, --line-regexp force PATTERN to match only whole lines
-z, --null-data a data line ends in 0 byte, not newline
Miscellaneous:
-s, --no-messages suppress error messages
-v, --invert-match select non-matching lines
-V, --version print version information and exit
--help display this help and exit
--mmap use memory-mapped input if possible
Output control:
-m, --max-count=NUM stop after NUM matches
-b, --byte-offset print the byte offset with output lines
-n, --line-number print line number with output lines
--line-buffered flush output on every line
-H, --with-filename print the filename for each match
-h, --no-filename suppress the prefixing filename on output
--label=LABEL print LABEL as filename for standard input
-o, --only-matching show only the part of a line matching PATTERN
-q, --quiet, --silent suppress all normal output
--binary-files=TYPE assume that binary files are TYPE
TYPE is 'binary', 'text', or 'without-match'
-a, --text equivalent to --binary-files=text
-I equivalent to --binary-files=without-match
-d, --directories=ACTION how to handle directories
ACTION is 'read', 'recurse', or 'skip'
-D, --devices=ACTION how to handle devices, FIFOs and sockets
ACTION is 'read' or 'skip'
-R, -r, --recursive equivalent to --directories=recurse
--include=PATTERN files that match PATTERN will be examined
--exclude=PATTERN files that match PATTERN will be skipped.
--exclude-from=FILE files that match PATTERN in FILE will be skipped.
-L, --files-without-match only print FILE names containing no match
-l, --files-with-matches only print FILE names containing matches
-c, --count only print a count of matching lines per FILE
-Z, --null print 0 byte after FILE name
Context control:
-B, --before-context=NUM print NUM lines of leading context
-A, --after-context=NUM print NUM lines of trailing context
-C, --context=NUM print NUM lines of output context
-NUM same as --context=NUM
--color[=WHEN],
--colour[=WHEN] use markers to distinguish the matching string
WHEN may be `always', `never' or `auto'.
-U, --binary do not strip CR characters at EOL (MSDOS)
-u, --unix-byte-offsets report offsets as if CRs were not there (MSDOS)
`egrep' means `grep -E'. `fgrep' means `grep -F'.
With no FILE, or when FILE is -, read standard input. If less than
two FILEs given, assume -h. Exit status is 0 if match, 1 if no match,
and 2 if trouble.
Report bugs to <bug-gnu-utils@gnu.org>.
Unfortunately, Solaris grep, egrep or fgrep are not quite as advanced:
Usage: grep -hblcnsviw pattern file . . .
usage: egrep [ -bchilnsv ] [ -e exp ] [ -f file ] [ strings ] [ file ] ...
usage: fgrep [ -bchilnsvx ] [ -e exp ] [ -f file ] [ strings ] [ file ] ...
There is a script available written in perl called wgrep which is freely available from the internet somewhere (ask google). This wgrep does quite a good job of this with the -wB:A syntax, where B is a number of lines Before the match, and A is the number of line After the match:
Usage: wgrep [-n] [-w[B][:A] | -W] [-d] [-p] [-s] [-m] regexp file(s)
-n = number lines
-s = mark matched lines with asterisks
-wB:A = display B lines before and A lines after
each matched line [both default to 3]
-W = suppress window; equivalent to -w0:0
-d = suppress separation lines between sections
-m = suppress file name header lines
-p = plain mode: equivalent to -W -d
-h = print this help message and exit
Note: If present, -h prevails; otherwise, the rightmost
option wins in the case of contradictions.
-bash-3.00$ ./wgrep -w4:3 -n ^fish$ /usr/dict/words
********** /usr/dict/words **********
8756 firsthand
8757 fiscal
8758 Fischbein
8759 Fischer
8760 fish
8761 fisherman
8762 fishermen
8763 fishery
However, if you know where to look, and you had the foresight to include them on install, in Solaris 10, there is quite a nice collection of freeware tools under /usr/sfw and if you look hard enough (not that hard actually), you can find GNU grep:
-bash-3.00$ /usr/sfw/bin/ggrep --help
Usage: ggrep [OPTION]... PATTERN [FILE] ...
Search for PATTERN in each FILE or standard input.
Example: ggrep -i 'hello world' menu.h main.c
Regexp selection and interpretation:
-E, --extended-regexp PATTERN is an extended regular expression
-F, --fixed-strings PATTERN is a set of newline-separated strings
-G, --basic-regexp PATTERN is a basic regular expression
-P, --perl-regexp PATTERN is a Perl regular expression
-e, --regexp=PATTERN use PATTERN as a regular expression
-f, --file=FILE obtain PATTERN from FILE
-i, --ignore-case ignore case distinctions
-w, --word-regexp force PATTERN to match only whole words
-x, --line-regexp force PATTERN to match only whole lines
-z, --null-data a data line ends in 0 byte, not newline
Miscellaneous:
-s, --no-messages suppress error messages
-v, --invert-match select non-matching lines
-V, --version print version information and exit
--help display this help and exit
--mmap use memory-mapped input if possible
Output control:
-m, --max-count=NUM stop after NUM matches
-b, --byte-offset print the byte offset with output lines
-n, --line-number print line number with output lines
--line-buffered flush output on every line
-H, --with-filename print the filename for each match
-h, --no-filename suppress the prefixing filename on output
--label=LABEL print LABEL as filename for standard input
-o, --only-matching show only the part of a line matching PATTERN
-q, --quiet, --silent suppress all normal output
--binary-files=TYPE assume that binary files are TYPE
TYPE is 'binary', 'text', or 'without-match'
-a, --text equivalent to --binary-files=text
-I equivalent to --binary-files=without-match
-d, --directories=ACTION how to handle directories
ACTION is 'read', 'recurse', or 'skip'
-D, --devices=ACTION how to handle devices, FIFOs and sockets
ACTION is 'read' or 'skip'
-R, -r, --recursive equivalent to --directories=recurse
--include=PATTERN files that match PATTERN will be examined
--exclude=PATTERN files that match PATTERN will be skipped.
--exclude-from=FILE files that match PATTERN in FILE will be skipped.
-L, --files-without-match only print FILE names containing no match
-l, --files-with-matches only print FILE names containing matches
-c, --count only print a count of matching lines per FILE
-Z, --null print 0 byte after FILE name
Context control:
-B, --before-context=NUM print NUM lines of leading context
-A, --after-context=NUM print NUM lines of trailing context
-C, --context=NUM print NUM lines of output context
-NUM same as --context=NUM
--color[=WHEN],
--colour[=WHEN] use markers to distinguish the matching string
WHEN may be `always', `never' or `auto'.
-U, --binary do not strip CR characters at EOL (MSDOS)
-u, --unix-byte-offsets report offsets as if CRs were not there (MSDOS)
`egrep' means `grep -E'. `fgrep' means `grep -F'.
With no FILE, or when FILE is -, read standard input. If less than
two FILEs given, assume -h. Exit status is 0 if match, 1 if no match,
and 2 if trouble.
Report bugs to <bug-gnu-utils@gnu.org>.
-bash-3.00$ /usr/sfw/bin/ggrep -B4 -A4 ^fish$ /usr/dict/words
firsthand
fiscal
Fischbein
Fischer
fish
fisherman
fishermen
fishery
fishmonger
Next time I promise to write about something less dull, but this nugget of useful information may really save you some time.
Paul.
This entry covers a recent discovery I made that has helped part automate our process of SSL CSR generation. I’m sure many of you, if you support ssl based websites are familiar with the openssl process of generating a CSR in order to submit to Verisign, Comodo or other CA and receive a shiny new certificate for installation under apache or similar.
This has been for me up until recently, a manual process, looking something like the following under openssl
1. Generate a key;
# /usr/sfw/bin/openssl
OpenSSL> genrsa -out my.website.com.key 2048
Generating RSA private key, 2048 bit long modulus
...................................+++
..........+++
e is 65537 (0x10001)
2. Generate the CSR:
OpenSSL> req -new -key my.website.com.key -out my.website.com.csr
You are about to be asked to enter information that will be incorporated
into your certificate request.
What you are about to enter is what is called a Distinguished Name or a DN.
There are quite a few fields but you can leave some blank
For some fields there will be a default value,
If you enter '.', the field will be left blank.
-----
Country Name (2 letter code) [US]:GB
State or Province Name (full name) [Some-State]: Gloucestershire
Locality Name (eg, city) []:Chipping Sodbury
Organization Name (eg, company) [Unconfigured OpenSSL Installation]:The Donkey Sanctuary
Organizational Unit Name (eg, section) []:WebSupport
Common Name (eg, YOUR name) []:my.website.com
Email Address []:
Please enter the following 'extra' attributes
to be sent with your certificate request
A challenge password []:
An optional company name []:
OpenSSL> quit
3. Send the file my.website.com.csr to your SSL Certificate Authority and wait for your new certificate.
Given that most of us will support many sites, and the data to be input for each CSR will change only very slightly (often, just the website name) I knew there had to be a better way, and there is. There is a -batch parameter that when combined with the -config option allows you to provide a config file as input for the command and avoid having to type the parameters each time. The command will look something like:
/usr/sfw/bin/openssl req -batch -new -key my.website.com.key -out my.website.com.csr -config config-file.txt
The config file format isn’t too hard to get right either, here is an example:
RANDFILE = $ENV::HOME/.rnd
[ req ]
default_bits = 1024
default_keyfile = keyfile.pem
distinguished_name = req_distinguished_name
attributes = req_attributes
prompt = no
output_password = mypass
[ req_distinguished_name ]
C = GB
ST = Test State or Province
L = Test Locality
O = Organization Name
OU = Organizational Unit Name
CN = Common Name
emailAddress = test@email.address
[ req_attributes ]
challengePassword = A challenge password
I hope you find this useful.
Paul.
Writing about web page http://wikis.sun.com/display/OpenSolarisInfo/How+to+Manage+the+Automatic+ZFS+Snapshot+Service
While I was working on a perl script for automating ZFS snapshots I happened to discover the automatic ZFS snapshot service for opensolaris by Tim Foster.
http://wikis.sun.com/display/OpenSolarisInfo/How+to+Manage+the+Automatic+ZFS+Snapshot+Service
Which is, of course, exactly what I need – the problem is that I need it to run on Solaris 10, for which it is not currently available. However, this thread on Tim’s blog (especially the comments) suggest that it is possible to get the service working, without the time slider element on Solaris 10:
http://blogs.sun.com/timf/entry/zfs_automatic_snapshots_0_12
So, I attempted yesterday to get this installed, compiled into an SVR4 package with pkgmk and pktrans and install it on Solaris 10. It was remarkably straightforward and early testing shows it to work very well.
First, get the ‘bits’ and make the pkg as described on Tim’s blog, using hg to clone and make to create the pkg tree.
$ hg clone ssh://anon@hg.opensolaris.org/hg/jds/zfs-snapshot
Before actually creating the package, there are 3 changes as detailed by Eli Kleinamm, again on the blog comment : http://blogs.sun.com/timf/entry/zfs_automatic_snapshots_0_12
These changes are made in ~/auto-snapshot/zfs-snapshot/src/lib/svc/method
# vi zfs-auto-snapshot
Change:
1) The shell to run in /usr/dt/bin/dtksh? 2) Remove on line 511 and 517 the -o com.sun:auto-snapshot-desc=”$EVENT”,
and
3)Remove the space/tab on line 970 between $SWAPVOLS and $(echo? SWAPVOLS=”$SWAPVOLS$(echo $swap | sed -e ’s#/dev/zvol/dsk/##’)”
Once done, run make – it should create a pkg format file tree for you in the current working directory – read the Makefile, its not long.
Next, create the pkg datastream for portability
$ pkgtrans -s . /var/spool/pkg/SUNWzfs-auto-snapshot.pkg SUNWzfs-auto-snapshot
Transferring <SUNWzfs-auto-snapshot> package instance
This will now simply pkgadd to a Solaris 10 system. I can’t help but think this is particularly awesome, but as a colleague pointed out – this now really has to go through some very extensive testing before any rollout. I still think its a lot better than my dangerous perl scripting attempts.
This will now (if I can get agreement internally) be added as a package to the standard build for all Solaris 10 hosts using ZFS within our control.
The service provides 5 SMF services for managing automatic snapshots. These are:
svc:/system/filesystem/zfs/auto-snapshot:monthly svc:/system/filesystem/zfs/auto-snapshot:weekly svc:/system/filesystem/zfs/auto-snapshot:daily svc:/system/filesystem/zfs/auto-snapshot:hourly svc:/system/filesystem/zfs/auto-snapshot:frequent
Enable these to have snapshots taken at a regularity as suggested by the name. Each has properties to tune, the most notable being how many snapshots to retain;
-bash-3.00$ svccfg -s auto-snapshot:daily listprop zfs/keep
zfs/keep astring 31
-bash-3.00$ svccfg -s auto-snapshot:frequent listprop zfs/keep
zfs/keep astring 4
-bash-3.00$ svccfg -s auto-snapshot:frequent listprop zfs/period
zfs/period astring 15
The defaults for daily and frequent being 1 months worth of dailies and 4 frequents. Frequent is defined initially as once every 15 mins as shown by the zfs/period property.
Whether a ZFS dataset is snapshot’ed or not is controlled by a user level ZFS property com.sun:auto-snapshot which can be true or false for a dataset and com.sun:auto-snapshot:frequent, com.sun:auto-snapshot:daily, etc, etc properties for each periodicity as required. These are inherited, so it is probably a good idea to set com.sun:auto-snapshot to false at the root of a pool and then change the required sub-datasets to true as needed. Perhaps an example would help;
-bash-3.00$ pfexec zfs set com.sun:auto-snapshot=true datapool/zfstestz1_ds
-bash-3.00$
-bash-3.00$ pfexec zfs set com.sun:auto-snapshot=false rpool
-bash-3.00$ pfexec zfs set com.sun:auto-snapshot=true rpool/export
-bash-3.00$ pfexec zfs set com.sun:auto-snapshot:frequent=true rpool/export
-bash-3.00$ pfexec zfs set com.sun:auto-snapshot:frequent=true datapool/zfstestz1_ds
-bash-3.00$
<getting bored with pfexec for each cmd>
-bash-3.00$ pfexec bash
bash-3.00# zfs set com.sun:auto-snapshot=false rpool/dump
bash-3.00# zfs set com.sun:auto-snapshot=false rpool/swap
bash-3.00# cat /etc/release
Solaris 10 10/09 s10x_u8wos_08a X86
Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
Use is subject to license terms.
Assembled 16 September 2009
bash-3.00#
bash-3.00# svcs -a |grep snapshot
disabled 17:59:54 svc:/system/filesystem/zfs/auto-snapshot:monthly
disabled 17:59:54 svc:/system/filesystem/zfs/auto-snapshot:weekly
disabled 17:59:54 svc:/system/filesystem/zfs/auto-snapshot:daily
disabled 17:59:54 svc:/system/filesystem/zfs/auto-snapshot:hourly
online 18:00:44 svc:/system/filesystem/zfs/auto-snapshot:event
online 18:09:18 svc:/system/filesystem/zfs/auto-snapshot:frequent
bash-3.00#
bash-3.00#
bash-3.00# zfs list -t snapshot | grep frequent
datapool@zfs-auto-snap_frequent-2010-09-23-1809 0 - 28.0K -
datapool/zfstestz1_ds@zfs-auto-snap_frequent-2010-09-23-1809 0 - 28.0K -
rpool/export@zfs-auto-snap_frequent-2010-09-23-1809 0 - 23K -
rpool/export/home@zfs-auto-snap_frequent-2010-09-23-1809 0 - 954K -
bash-3.00#
bash-3.00# pkginfo SUNWzfs-auto-snapshot
application SUNWzfs-auto-snapshot ZFS Automatic Snapshot Service
bash-3.00#
bash-3.00# date
Thursday, 23 September 2010 18:15:48 BST
bash-3.00# zfs list -t snapshot | grep frequent
datapool@zfs-auto-snap_frequent-2010-09-23-1809 0 - 28.0K -
datapool@zfs-auto-snap_frequent-2010-09-23-1815 0 - 28.0K -
datapool/zfstestz1_ds@zfs-auto-snap_frequent-2010-09-23-1809 0 - 28.0K -
datapool/zfstestz1_ds@zfs-auto-snap_frequent-2010-09-23-1815 0 - 28.0K -
rpool/export@zfs-auto-snap_frequent-2010-09-23-1809 0 - 23K -
rpool/export@zfs-auto-snap_frequent-2010-09-23-1815 0 - 23K -
rpool/export/home@zfs-auto-snap_frequent-2010-09-23-1809 0 - 954K -
rpool/export/home@zfs-auto-snap_frequent-2010-09-23-1815 0 - 954K -
I think this should prove to be very useful for us, I would be very interested, as usual in hearing your comments.
Paul.
This blog entry, wasn’t intended to be published. It was simple a place for me to collect my thoughts on the state of one of our servers and its remaining capacity for extra zones.
The server is an X4170 with 2 Quad-Core AMD Opterons and 64gb of ram. It is currently running 11 zones, each running an instance of apache and an instance of tomcat. The load currently looks reasonably light, certainly from a CPU viewpoint:
ZONEID NPROC SWAP RSS MEMORY TIME CPU ZONE
0 61 1314M 1308M 2.0% 804:24:29 2.9% global
13 58 1381M 936M 1.4% 35:16:01 0.5% zoneA
16 45 1238M 496M 0.8% 11:20:16 0.2% zoneB
29 55 1378M 576M 0.9% 4:30:13 0.1% zoneC
9 47 1286M 496M 0.8% 21:08:49 0.1% zoneD
2 60 1290M 464M 0.7% 7:35:19 0.0% zoneE
7 51 1298M 460M 0.7% 6:50:11 0.0% zoneF
24 57 835M 896M 1.4% 17:14:18 0.0% zoneG
27 45 138M 191M 0.3% 7:52:47 0.0% zoneH
4 47 345M 408M 0.6% 11:26:22 0.0% zoneI
Total: 573 processes, 2649 lwps, load averages: 0.38, 0.29, 0.27
Note that each zone is consuming several hundred meg of ram (see RSS column), this consumption will almost certainly be due to the java heap settings / sizings of the various tomcat instances.
Lets take a look at one of the zones
> zlogin zoneA ps -futomcat
UID PID PPID C STIME TTY TIME CMD
tomcat 29407 29406 0 Jun 07 ? 1294:57 /usr/java/bin/java -Djava.util.logging.config.file=/usr/local/tomcat/conf/loggi
tomcat 29406 20781 0 Jun 07 ? 0:47 /usr/local/sbin/cronolog /usr/local/tomcat/logs/catalina.out.%Y-%m-%d
How to interrogate this process to find the resident set size? Well, there are a few options; hunt down the config entries that lauch this tomcat server (usually set as a var CATALINA_OPTS), take the easy route and use prstat or the ‘process tools’ such as pmap. Ever used pmap? pmap -x will show more detail about the address space mapping of a process than you ever wanted to know; shown below, we can see that the tomcat process in zoneA is using >700mb of ram.
bash-3.00# pmap -x 29407
29407: /usr/java/bin/java -Djava.util.logging.config.file=/usr/local/tomcat/c
Address Kbytes RSS Anon Locked Mode Mapped File
08008000 12 - - - ----- [ anon ]
0803D000 44 44 44 - rwx-- [ stack ]
<... truncated ...>
FEFF0000 4 4 4 - rwx-- [ anon ]
FEFFB000 8 8 8 - rwx-- ld.so.1
FEFFD000 4 4 4 - rwx-- ld.so.1
-------- ------- ------- ------- -------
total Kb 1349932 780360 744800 -
Thought 1, there is only 6gb of free memory (as defined as actually on the freelist). Where is it all? I would expect that the ZFS arc cache will be consuming the lions share, and this blog entry will show you how to check. As applications use more ram, ZFSshould behave and reluinquish the previously “unused” ram; but remember that reducing ZFS’s available allocation (by default 7/8ths of RAM) will in theory impact read performance as we will likely see more cache misses. ZFS should be pretty good at reducing the arc size as the system requests more and more ram. Let’s check the numbers, both of ram used for arc and the cache efficiency statistics.
Here is the mdb memstat that shows the memory usage detail:
bash-3.00# echo "::memstat" | mdb -k
Page Summary Pages MB %Tot
------------ ---------------- ---------------- ----
Kernel 13015620 50842 78%
Anon 1416662 5533 8%
Exec and libs 42400 165 0%
Page cache 695383 2716 4%
Free (cachelist) 27757 108 0%
Free (freelist) 1577146 6160 9%
Total 16774968 65527
Physical 16327309 63778
bash-3.00#
Rather a lot used by the kernel (indicative of ZFS arc usage), rather than anon, exec and libs, note also a currently a rather small freelist as a percentage of total physical ram, but as mentioned I suspect ZFS will be consuming a lot (perhaps 40g plus?) of the othewise unused ram.
The free memory values and zfs arc stats can conveniently be confirmed with a quick look at the kstats via arc_summary, which is an incredibly useful tool written by Ben Rockwood (thanks Ben) which saves us all an immense amount of time both remember how, where and which kstats to interrogate for memory and zfs statistics.
Get it here: http://cuddletech.com/arc_summary/
System Memory:
Physical RAM: 65527 MB
Free Memory : 4369 MB
LotsFree: 996 MB
The arcsize, as suspected is large; at over 45gb
ARC Size:
Current Size: 46054 MB (arcsize)
The ARC cache, is however doing a good job with that 45gb, a hit ratio of 96% isn’t bad.
ARC Efficency:
Cache Access Total: 4384785832
Cache Hit Ratio: 96% 4242292557 [Defined State for buffer]
Cache Miss Ratio: 3% 142493275 [Undefined State for Buffer]
REAL Hit Ratio: 92% 4060875546 [MRU/MFU Hits Only]
Notice also that the ARC is doing a fairly reasonable job of correctly predicting our prefetch requirements, 36% isn’t too bad compared to a directly demanded cache hit rate of 72%.
Data Demand Efficiency: 72%
Data Prefetch Efficiency: 36%
OK, so in theory if we keep adding zones we have to be very careful about the available ram allocations given that these zones are running some reasonably memory-hungry (tomcat) java instances. As more and more memory is allocated to apache/tomcat and java within new zones, less and less ram will be available for the zfs arc. This will need some careful monitoring.
Currently the cpu usage on this server is also fairly light, but I have no view of the future levels of usage or traffic intended to run through these services. I’ll park this one for now.
Thought 2, disk space. Currently the zones have a zfs dataset allocated in the root pool for each zone root:
rpool/ROOT/s10x10-08/zones/zoneB 6.23G 5.77G 6.23G /zones/zoneB
Each of which has a 12gb quota, the output above (from zfs list) is typical of the usage for the zones, around 50% of the 12g quota. There are 11 zones, each with this 12gb quota, giving a potential total usage of 132gb. The entire root pool is only 136gb and already has just 32.5 gb available as shown by zfs list:
root > zpool list
NAME SIZE USED AVAIL CAP HEALTH ALTROOT
datapool 816G 31.2G 785G 3% ONLINE -
rpool 136G 104G 32.5G 76% ONLINE -
It seems, then, rather than concerning myself about memory or cpu usage on this server, unless we find elsewhere for the zone root filesystems, we are likely to run out of local storage space way before we stretch the capabilities of this server.
Soon a blog entry on increasing the size of your root pool with some mirror juggling, I’ve done this successfully on a VM, I just need a test server to perfect the process.
Comments welcome.
Paul
I recently needed to synchronise a web server I look after to a remote backup. Here are my notes on the installation of rsync, the prerequisite packages and a brief note on the usage I implemented for the rsync command.
You can obtain rsync from www.sunfreeware.com, along with the pre-requisites to run it. It is also available from www.blastwave.org if you prefer.
The packages required before you install rsync are:
– libgcc-3.4.6-sol10-sparc-local.gz – popt-1.14-sol10-sparc-local.gz – libiconv-1.13.1-sol10-sparc-local.gz – libintl-3.4.0-sol10-sparc-local.gz
Download each from www.sunfreeware.com, unzip and pkgadd. Below is an example for the gcc libraries, output omitted. It is worth noting that you will have these libraries if you have gcc installed, which this particular server didn’t.
# /usr/sfw/bin/wget ftp://ftp.sunfreeware.com/pub/freeware/sparc/10/libgcc-3.4.6-sol10-sparc-local.gz
# gunzip libgcc-3.4.6-sol10-sparc-local.gz
# pkgadd -G -d libgcc-3.4.6-sol10-sparc-local
The rsync syntax I used to actually perform the synchronisation was
/usr/local/bin/rsync -avz --delete -e ssh <username>@<host>:/source-dir/source-subdir/ /local-destination-dir/local-dest-subdir
This is explained thus:
-a : archive mode : this ensures that all symbolic links, devices, attributes, permissions and ownerships are preserved in the transfer. -v : verbose mode : this lists the file sync list as well as statistics about the transfer time and data volume -z : compression : this trades transfer data volume for cpu cycles by compressing the data before sending over the wire
—delete : this option will remove files in the destination directory if they no longer exist in the source tree -s ssh : this is the transport option, advantages of using ssh are obviously encryption and secure no-password logins with RSA keys
Another point of worthy note is the trailing slash on the source directory path. This is significant because it tells rsync to copy the contents of the directory, rather than the directory itself, omit this and you will likely end up with the source directory being (re)-created inside the destination/target directory. Just be aware, so you can choose the behaviour you need.
The output will look something like below (which is just a test run on an apache logs directory to show the operation).
syncing /opt/coolstack/apache2/logs/ /backups/webserv/opt/coolstack/apache2/logs
receiving incremental file list
access_log
error_log
ssl_request_log
sent 244123 bytes received 72351 bytes 3313.86 bytes/sec
total size is 589183260 speedup is 1861.71
As usual, this is mostly for my benefit, but I hope it helps you too.
Writing about web page http://java.sun.com/performance/jvmstat/
The installation of jvmstat on Solaris really is very straightforward. These notes are more a quick reminder for myself rather than anything else, but perhaps it will be of benefit to others to find the information in one place.
First step; I downloaded jvmstat from http://java.sun.com/performance/jvmstat/
The installation is very simple, just unzip the file into /usr/local :
# cd -
/usr/local
# cp /var/spool/pkg/jvmstat-2.0_b11.zip .
# unzip jvmstat-2.0_b11.zip
Archive: jvmstat-2.0_b11.zip
creating: jvmstat/
creating: jvmstat/policies/
< ... truncated ... >
inflating: jvmstat/docs/install/windows.html
inflating: jvmstat/docs/install/solaris.html
I chose to install jvmstat 2.0, although 3.0 is available because 3.0 requires Java 1.5.0. At the time of install, I didn’t realise that 1.5.0 was available in /usr/local/jdk1.5.0_16/. I may upgrade later.
The environment variable of worthy note that you would need to set are:
# echo $PATH
/usr/sbin:/usr/bin:/usr/local/jvmstat/bin:/usr/java/bin
# echo $JAVA_HOME
/usr/java
Once this is all done, the Java VMs running on the system can be found with jvmps (jps in version 3.0) and a quick command line test can be run with jvmstat (jstat in version 3.0):
# jvmps
6803 jvmps.jar
1928 org.apache.catalina.startup.Bootstrap
# jvmstat -gcutil 1928 1000 3
S0 S1 E O P YGC YGCT FGC FGCT GCT
0.00 47.48 83.34 6.96 99.40 85 0.881 4 2.204 3.085
0.00 47.48 83.34 6.96 99.40 85 0.881 4 2.204 3.085
0.00 47.48 83.34 6.96 99.40 85 0.881 4 2.204 3.085
The GUI can be run with ‘visualgc ’ which I tested and works fine, you will need to have an Xserver to display the output on though.
Today I needed to install visualgc for tomcat on Solaris. Now, GC analysis and configuration of the tools required to do so is something I’ve not done in a number of years. So, as a small exercise designed to re-familiarise myself with the foibles of tomcat, garbage collection, java command line configuration options and so on I ventured on a brief excursion into the world of tomcat, java and jvmstat on Mac OS X. It is Unix, afterall, so what could be so hard – right?
First task, download and install tomcat. This really couldn’t be simpler, a zipped tar file to install pretty much wherever you choose. A typical, popular location is /usr/local. I have chosen /usr/local/tomcat.
I downloaded apache-tomcat-5.5.30.tar.gz (there are later versions available depending on your requirements, but this is the version I will be monitoring at work) from http://tomcat.apache.org/
Copy the file into /usr/local and extract
$ gnutar -xzvf apache-tomcat-5.5.30.tar.gz
Change the ownership to your local user and group:
$ chown -R egg:staff apache-tomcat-5.5.30
In order to start and stop tomcat with the correct details for CATALINA_HOME etc, you’ll need tomcat stop and start scripts. I used the following, fairly standard looking scripts that I placed into $HOME/bin, please note that the variables will of course vary depending on your configuration. Worthy of note here is that I *did not* eventually use JAVA_HOME set to /usr – more on that later.
start_tomcat
#!/bin/sh export CATALINA_HOME=/usr/local/apache-tomcat-5.5.30 export JAVA_HOME=/usr $CATALINA_HOME/bin/startup.sh
stop_tomcat
#!/bin/sh export CATALINA_HOME=/usr/local/apache-tomcat-5.5.30 export JAVA_HOME=/usr $CATALINA_HOME/bin/shutdown.sh
As mentioned, a good place for these are in your local home directory, but this may vary depending on your environment. I put them in ~/bin/start_tomcat and ~/bin/stop_tomcat. Don’t forget to make them executable.
For my purposes, I created a tomcat-env.sh script that can be used to some CATALINA_OPTS, I was only really interested in changing the java min and max heap sizes for now:
$ cat tomcat-env.sh CATALINA_OPTS=$CATALINA_OPTS" -Xms512m -Xmx512m" export CATALINA_OPTS
You should now be able to run ~/bin/start_tomcat and connect to http://localhost:8080 to be greeted with your tomcat installation home page.
Next, download jvmstart from http://java.sun.com/performance/jvmstat/
I chose to put this in /usr/local/jvmstat, again, this is local site install dependent.
$ cp /Users/egg/Downloads/jvmstat-3_0.zip /usr/local $ unzip jvmstat-3_0.zip
I created a ~/jvmstat-env.sh to set required variables for jvmstat:
export JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Versions/1.6.0/Home PATH=JVMSTAT_HOME/bin:JAVA_HOME/bin:$PATH export PATH export JVMSTAT_JAVA_HOME=$JAVA_HOME
This is where I should say something about the madness of JDKs on MacOS. Under most OSes, the Java JDK has a ~/lib/tools.jar that the jvmstat tools rely on. In fact, jvmstat (more specificailly visualgc) looks for this file to determine whether you have pointed your java home at a jre or a jdk. If visualgc thinks you don’t have a complete jdk you get the following rather helpful error:
$ /usr/local/jvmstat/bin/visualgc 8650 The java.exe found at: /System/Library/Frameworks/JavaVM.framework/Versions/1.5.0/Home is not in a JDK directory. Please set and export your JVMSTAT_JAVA_HOME environment variable to refer to a directory containing the Sun J2SE 1.5.0 JDK (not a JRE)
Yes, this is Mac OS, and yes it does bizarrely refer to ‘java.exe’. A little investigation shows that visualgc is indeed looking for the tools.jar on start:
# check that we are dealing with a JDK, not a JRE or installed JDK if [ ! -f "${JVMSTAT_JAVA_HOME}/lib/tools.jar" ] ; then jreerror fi
Under MacOS, however, the equivalent to tools.jar is classes.jar. I am yet to establish why this should be different. The Java home isn’t to be found in /usr either, that directory just contain a /usr/bin/java symlink to the latest runtimes for Java applications on your Mac, instead I found I had to set a java home to:
export JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Versions/1.6.0/Home
Or, if you are lucky, the following may work :
export JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Home
However, classes.jar only seemed to exist in /System/Library/Frameworks/JavaVM.framework/Versions/1.6.0/Classes/classes.jar
So, with JAVA_HOME set to /System/Library/Frameworks/JavaVM.framework/Versions/1.6.0/Home
I created a symbolic link from /System/Library/Frameworks/JavaVM.framework/Versions/1.6.0/Home/lib/tools.jar to /System/Library/Frameworks/JavaVM.framework/Versions/1.6.0/Classes/classes.jar, now visualgc would finally start without the error.
Once you have tomcat installed, the jvmstat installed, the jvmstat-env.sh set correctly and the java home set with a valid tools.jar / classes.jar you should be able to start tomcat, test jvmstat using gcutil at the command line and finally launch visualgc with some success:
$ . tomcat-env.sh $ . jvmstat-env.sh $ bin/start_tomcat Using CATALINA_BASE: /usr/local/apache-tomcat-5.5.30 Using CATALINA_HOME: /usr/local/apache-tomcat-5.5.30 Using CATALINA_TMPDIR: /usr/local/apache-tomcat-5.5.30/temp Using JRE_HOME: /System/Library/Frameworks/JavaVM.framework/Versions/1.6.0/Home Using CLASSPATH: /usr/local/apache-tomcat-5.5.30/bin/bootstrap.jar
Lets check tomcat is running:
$ ps -ef |grep java 501 86749 1 0 0:00.24 ttys002 0:03.39 /System/Library/Frameworks/JavaVM.framework/Versions/1.6.0/Home/bin/java -Djava.util.logging.config.file=/usr/local/apache-tomcat-5.5.30/conf/logging.properties -Djava.util.logging.manager=org.apache.juli.ClassLoaderLogManager -Xms512m -Xmx512m -Xms512m -Xmx512m -Xms512m -Xmx512m -Djava.endorsed.dirs=/usr/local/apache-tomcat-5.5.30/common/endorsed -classpath /usr/local/apache-tomcat-5.5.30/bin/bootstrap.jar -Dcatalina.base=/usr/local/apache-tomcat-5.5.30 -Dcatalina.home=/usr/local/apache-tomcat-5.5.30 -Djava.io.tmpdir=/usr/local/apache-tomcat-5.5.30/temp org.apache.catalina.startup.Bootstrap start
Marvellous.
Now find the process id of interest, the easy way:
$ jps 86749 Bootstrap 86754 Jps
We are interested in ‘Bootstrap’ (org.apache.catalina.startup.Bootstrap)
Let’s test the jvmstat with gcutil on the command line, using the process number (86749) gleaned from jps:
$ jstat -gcutil 86749 1000 3 S0 S1 E O P YGC YGCT FGC FGCT GCT 0.0097.0566.06 0.0067.34 1 0.020 0 0.000 0.020 0.0097.0566.06 0.0067.34 1 0.020 0 0.000 0.020 0.0097.0566.06 0.0067.34 1 0.020 0 0.000 0.020
Looks good, and as long as we have the java home, and the link from tools.jar to classes.jar as described above, we should be able to lauch visualgc:
$ /usr/local/jvmstat/bin/visualgc 86749
Q. How do I format date to display on screen on for my scripts as per my requirements?
A. You need to use standard date command to format date or time for output or to use in a shell script.
Syntax to specify format date +FORMAT
Task: Display date in mm-dd-yy format Type the command as follows: $ date +”%m-%d-%y” Output: 02-27-07
Turn on 4 digit year display: $ date +”%m-%d-%Y”
Just display date as mm/dd/yy format: $ date +”%D”
Task: Display time only Type the command as follows: $ date +”%T” Output: 19:55:04
Display locale’s 12-hour clock time $ date +”%r” Output: 07:56:05 PM
Display time in HH:MM format: $ date +”%H-%M”
How do I save time/date format to a variable? Simply type command as follows at a shell prompt: $ NOW=$(date +”%m-%d-%Y”)
To display a variable use echo / printf command: $ echo $NOW
Sample shell script: #!/bin/bash NOW=$(date +”%m-%d-%Y”) FILE=”backup.$NOW.tar.gz”
# rest of script Complete list of FORMAT control characters supported by date command FORMAT controls the output.It can be the combination of any one of the following:
%% a literal % %a locale’s abbreviated weekday name (e.g., Sun) %A locale’s full weekday name (e.g., Sunday) %b locale’s abbreviated month name (e.g., Jan) %B locale’s full month name (e.g., January) %c locale’s date and time (e.g., Thu Mar 3 23:05:25 2005) %C century; like %Y, except omit last two digits (e.g., 21) %d day of month (e.g, 01) %D date; same as %m/%d/%y %e day of month, space padded; same as %_d %F full date; same as %Y-%m-%d %g last two digits of year of ISO week number (see %G) %G year of ISO week number (see %V); normally useful only with %V %h same as %b %H hour (00..23) %I hour (01..12) %j day of year (001..366) %k hour ( 0..23) %l hour ( 1..12) %m month (01..12) %M minute (00..59) %n a newline %N nanoseconds (000000000..999999999) %p locale’s equivalent of either AM or PM; blank if not known %P like %p, but lower case %r locale’s 12-hour clock time (e.g., 11:11:04 PM) %R 24-hour hour and minute; same as %H:%M %s seconds since 1970-01-01 00:00:00 UTC %S second (00..60) %t a tab %T time; same as %H:%M:%S %u day of week (1..7); 1 is Monday %U week number of year, with Sunday as first day of week (00..53) %V ISO week number, with Monday as first day of week (01..53) %w day of week (0..6); 0 is Sunday %W week number of year, with Monday as first day of week (00..53) %x locale’s date representation (e.g., 12/31/99) %X locale’s time representation (e.g., 23:13:48) %y last two digits of year (00..99) %Y year %z +hhmm numeric timezone (e.g., -0400) %:z +hh:mm numeric timezone (e.g., -04:00) %::z +hh:mm:ss numeric time zone (e.g., -04:00:00) %:::z numeric time zone with : to necessary precision (e.g., -04, +05:30) %Z
|
|
Recent Comments