ShellTricks

=Searching=
 * Search for a "this OR that" search : grep "this|that"
 * Search for a "this AND that" serch : awk '/this/ && /that/'

=Cron to remove old/console logins= 0 6 * * * who -u | egrep "\.0|old" duh | awk '{print $7}' | xargs kill -9

=Add Thousands comma= from here 123,456,789
 * 1) echo 123456789  | /usr/bin/sed  -e :a -e 's/\(.*[0-9]\)\([0-9]\{3\}\)/\1,\2/;ta'

=Password Last Changed= A script (from here) which shows when user passwords were last changed, for OS's where shadow uses days past Epoch.
 * 1) !/usr/bin/perl
 * 2) Output date format is YYYY-MM-DD

open( S, "/etc/shadow" ); while(  ) { ($user,$lastchg) = (split /:/)[0,2]; @t = localtime( $lastchg*86400 ); printf "User %-8s last changed password %0.4d-%0.2d-%0.2d (%5d)\n", $user, $t[5]+1900, $t[4]+1, $t[3], $lastchg; } close( S ); exit 0; Here's a version that shows accounts with vaild passwords and are more than 90 days old (or today-$1 if passed):
 * 1) !/usr/bin/perl
 * 2) Output date format is YYYY-MM-DD

if (defined $ARGV[0]) {$expired = $expired = int(time / 86400) - int($ARGV[0])} else {$expired = int(time / 86400) - 90};
 * 1) Either it is older than "today-$1" or 90 days

open( S, "/etc/shadow" ); while(  ) { ($user,$lastchg) = (split /:/)[0,2]; $pwd = (split /:/)[1];

# unless ($pwd =~ m/\*|\!\!|\*LK\*|NP|\*LK\*NP/) { @t = localtime( $lastchg*86400 ); if ($lastchg < $expired) { printf "User %-10s PW last changed %0.4d-%0.2d-%0.2d (%5d)\n", $user, $t[5]+1900, $t[4]+1, $t[3], $lastchg; }  } } close( S ); exit 0;
 * 1) Check if password isn't "*","!!","*LK*","NP", or "*LK*NP" (i.e. is valid)

=Date Tricks=  Print yesterday, today, and tomorrow : # TZ=aaa24 date Tue Mar 3 14:29:52 aaa 2009 # date Wed Mar 4 08:29:53 CST 2009 # TZ=aaa-24 date Thu Mar 5 14:29:55 aaa 2009  Formatted : # TZ=aaa24 date +%Y%m%d 20090303
 * Convert Epoch day (i.e. /etc/shadow) to a date
 * Date minus one day, and how it works. Note "aaa" represents a non-existent timezone and can be anything that doesn't match a timezone abbreviation. the "24" part says 24 hours before current time.

=While Read= Here's a way to parse lines using a "while read" loop. Given the contents of "file.txt" of 1 2 3 4 5 6 7 8 9 To print each line: cat file.txt | while read x ; do echo $x ; done 1 2 3 4 5 6 7 8 9 To parse each line into variables: cat file.txt | while read x y z ; do echo $x $y $z ; done 1 2 3 4 5 6 7 8 9 Note that he remainder of line goes in the last variable if number of variables doesn't match number of elements on the line. cat file.txt | while read x y ; do echo $x $y ; done 1 2 3 4 5 6 7 8 9

=Add column=

In this example, adds first column : awk 'BEGIN{total=0}{total +=$1}END{print total}' To total and add commas to total (using above) : awk 'BEGIN{total=0}{total +=$5}END{print total}' \ | /usr/bin/sed -e :a -e 's/\(.*[0-9]\)\([0-9]\{3\}\)/\1,\2/;ta'

=Sort IP's from DNS hosts file=
 * grep '192\.168\.1\.' .hosts | awk '{printf "%-15s\t %s\n",$NF,$1}' | sort -t . -k 4 -un

=Replace Spaces in Filenames= ls | grep " " | while read file do   target=`echo "$file"|tr -s ' '|tr ' ' '-'` mv "$file" "$target" done
 * 1) !/bin/ksh

=Turn one column into two= To change: 1 into  1 2 2       3 4 3 4 Do: cat | awk '{if (NR % 2 == 1) {printf"%s ",$0} else {print $0}}'

=Search / Replace a file= perl -pi -e 's/what to search/replace with this/' file*

=Word Frequency Analysis= From Here. To count frequency of words in a text file (format is " "):
 * 1) !/bin/bash
 * 2) wf.sh: Crude word frequency analysis on a text file.
 * 3) This is a more efficient version of the "wf2.sh" script.

ARGS=1 E_BADARGS=85 E_NOFILE=86
 * 1) Check for input file on command-line.

if [ $# -ne "$ARGS" ] # Correct number of arguments passed to script? then echo "Usage: `basename $0` filename" exit $E_BADARGS fi

if [ ! -f "$1" ]      # Check if file exists. then echo "File \"$1\" does not exist." exit $E_NOFILE fi

sed -e 's/\.//g' -e 's/\,//g' -e 's/ /\ /g' "$1" | tr 'A-Z' 'a-z' | sort | uniq -c | sort -nr
 * 1) main
 * 1) main
 * 1)                            Frequency of occurrence
 * 1)                            Frequency of occurrence


 * 1)  Filter out periods and commas, and
 * 2) + change space between words to linefeed,
 * 3) + then shift characters to lowercase, and
 * 4) + finally prefix occurrence count and sort numerically.


 * 1)  Arun Giridhar suggests modifying the above to:
 * 2)  . . . | sort | uniq -c | sort +1 [-f] | sort +0 -nr
 * 3)  This adds a secondary sort key, so instances of
 * 4) + equal occurrence are sorted alphabetically.
 * 5)  As he explains it:
 * 6)  "This is effectively a radix sort, first on the
 * 7) + least significant column
 * 8) + (word or string, optionally case-insensitive)
 * 9) + and last on the most significant column (frequency)."
 * 10)  As Frank Wang explains, the above is equivalent to
 * 11) +      . . . | sort | uniq -c | sort +0 -nr
 * 12) + and the following also works:
 * 13) +      . . . | sort | uniq -c | sort -k1nr -k
 * 1) +      . . . | sort | uniq -c | sort -k1nr -k

exit 0