Linux 常用运维命令

2024-11-26 22:50:45
丁国栋
原创 24
摘要:本文记录了笔者从2015年11月以来收集整理的Linux 常用运维命令,原来整理在Evernote中,现在放到网页里方便快速查找。

以下命令是从2015年11月以来收集整理的Linux 常用运维命令。排名不分先后。暂时没有格式化,凑合着可以看。

自从有了GPT之后这些知识仿佛没有必要记录到笔记或者脑子里了,其实不然,笔记的好处是随时翻阅并强化记忆,而过分依赖 AI 可能会产生一些不好的影响。并非不推荐使用 AI 工具,但日常整理记录这些也很重要。


# 查看 Git 本地仓库的分支对应的上游分支名称
git rev-parse --abbrev-ref @{upstream}

统计443端口客户端连接数量信息
sudo netstat -anot | grep ':443' | awk '{print $5}' | cut -d':' -f1 | sort | uniq -c|sort -nk 1

统计Apache2进程占用的内存大小
ps aux | grep apache2 | awk '{sum += $6} END {print sum/1024 " MB"}'

如何获取一个Linux sock文件目前正在提供服务的数量,例如/run/php/php7.0-fpm.sock
ss -lp | grep php7.0-fpm.sock

# 查找俄文字符
grep -P "[А-Яа-яЁё]" filename.txt


# Debian Ubuntu静默执行apt
DEBIAN_FRONTEND=noninteractive apt upgrade -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold"
DEBIAN_FRONTEND=noninteractive apt install -y fio stress iperf3 libusb-1.0-0-dev libopencv-dev

find -maxdepth 1 -type d ! -wholename '.' |xargs -i rm -rf {}

# at 定时任务
systemctl status atd
echo "rsync -av /home/tux me@myserver:/home/tux/" | at 3:30 AM tomorrow
echo "/opt/batch.sh ~/Pictures" | at 3:30 AM 08/01/2022
echo "echo hello" | at now + 3 days
atq # 打印at的任务队列
at -c 1 # 查看要执行的内容

7z x zentao.7z

# git清理工作区
git clean -df

# grep 遵循软链接,-R 参数
sudo grep pms2081 /etc/nginx/ -Rni


高级的日志查看工具(A log file viewer for the terminal Merge, tail, search, filter, and query log files with ease. No server. No setup. Still featureful.)
lnav
CTRL+W 换行显示

多线程下载工具aria2
aria2c -x16 -s16 --out=filename URL

ssh压缩传输文件
tar -c --use-compress-program zstd <files> | pv --timer --rate | ssh <username>@<target_host> "cd <target_dir> && tar -x --zstd"

合并标准输出和标准错误输出到文件
nginx -V &> /dev/null

Docker容器查看容器占用内存等资源的情况
docker stats --no-stream mysql


获取GitHub某个仓库最新的release
curl -s https://api.github.com/repos/snail007/goproxy/releases/latest | grep "tag_name" | grep -o -E 'v[0-9]+(\.[0-9]+)*'

# 支持http协议的时间同步
sudo htpdate -a time.windows.com
nc -zu time.windows.com 123


grep匹配版本号
echo '"tag_name": "v14.1",' | grep -o -E 'v[0-9]+(\.[0-9]+)*


# 截断文件
sudo truncate -s 0 /path/to/file
> /path/to/file
echo | sudo tee /path/to/file


MySQL查看存储过程
SELECT * FROM information_schema.ROUTINES WHERE ROUTINE_SCHEMA = 'zabbix' ORDER BY ROUTINE_NAME;
SHOW FUNCTION STATUS WHERE Db = 'zabbix';


git log -n 1 --format="%h" # 获取commit id
git rev-parse --short=10 HEAD # 获取commit id
git diff --diff-filter=ACMR --name-only HEAD^ # 获取提交的文件路径

# /var/log/journal 日志清理
https://unix.stackexchange.com/questions/130786/can-i-remove-files-in-var-log-journal-and-var-cache-abrt-di-usr
sudo journalctl --vacuum-size=500M
journalctl --disk-usage


# chrome浏览器清除cookie
chrome://settings/content/all?searchSubpage=sanplex&search=cookie


journalctl -b -u systemd-resolved
journalctl -b -u systemd-networkd
sudo systemd-resolve --statistics


chown nodbody:nogroup -h /path/to/symbliclink # 修改软链接(符号链接)的所属者和所属组

chrome://net-internals/#hsts
chrome://net-internals/#dns
edge://net-internals/#hsts

使用find查找并删除文件时,应该将find后的目录的变量使用双引号包裹起来,防止为空删除当前工作目录。

# 清理journal log
sudo journalctl --disk-usage
sudo journalctl --vacuum-size=10M
sudo journalctl --vacuum-time=1w
sudo service systemd-journald restart

# 解决软件包冲突问题
sudo apt install --fix-broken
sudo apt-get update
sudo apt-get upgrade

# 标记某个包保持在当前版本不升级
sudo apt-mark hold nano


# unzip解压压缩包内指定的文件到指定位置
unzip ZenTaoPMS-max4.10-php7.2_7.4.zip zentaopms/www/upgrade.php -d zentaopms_max_4.10/www/     # 会保留目录结构
unzip -j ZenTaoPMS-max4.10-php7.2_7.4.zip zentaopms/www/upgrade.php -d zentaopms_max_4.10/www/  # 不保留目录结构

# 查看zipinfo,比如文件大小,数量,目录结构,压缩率
unzip -Z f3686.zip

sudo apt install 7zip -y
sudo apt install p7zip
7zr x zentao.7z

fatal: this operation must be run in a work tree
`git clone /path/to/raw/git/repo /path/to/worktree`
可以把git裸库克隆出一个工作副本到/path/to/worktree


计划任务任务模板

# Info   : Backup zentao pay user file to cos with restic
# Author : zhouyq
# CTime  : 2022.11.17
30 * * * * flock --verbose -xn /tmp/backup-file-to-cos.lock -c "/bin/bash -x /root/bin/zentao-backup/backup-file-to-cos.sh & " > /data/zentao-backup/log/restic/backup-file-$(date +\%F-\%H).log 2>&1


# 模板渲染
环境变量模板渲染:`/usr/bin/envsubst < apps/account/.env.template > apps/account/.env`
Jinja like 模板渲染: https://github.com/bitnami/render-template

# 删除文件
busybox rm xxx
find -delete

# find删除文件时,-delete参数必须放到最后
find -inum 1579401 -delete 

# ubuntu升级包
apt-get install --only-upgrade <package name>

# 查看网卡链路带宽,例如查询是百兆还是千兆
ethtool em1

# 网络诊断
ping dev.thedf.cc
mtr dev.thedf.cc
sudo traceroute -T  -p 443 dev.thedf.cc

# 数据库设置编码
set names 'utf8'

# 历史命令
export TMOUT=3600
HISTTIMEFORMAT="%F %T `whoami` "
PROMPT_COMMAND='{ msg=$(history 1 | { read x y ; echo $y ;});logger $(who am i):[`pwd`]" $msg";}'


# 清理不存在的远程分支
git remote prune origin

# 获取版本库路径
cat .git/config
git config --get remote.origin.url

# docker环境通过容器IP查询容器名:
docker inspect bridge |grep 172.17.0.14 -B4|grep Name


# 非贪婪模式
echo "ifif" | perl -pe 's/if{1}/if:/'

# df
df -TH 显示文件系统类型

# awk 匹配
awk -F= 'BEGIN{IGNORECASE=0} {if($0 ~ /^\$/)print $1}' system/module/paas/lang/zh-cn.php | sort | uniq -di

# git 删除remote中的分支
git push origin --delete zentaosaas/dingguodong
# git 删除本地的分支
git branch -d zentaosaas/dingguodong

# git 命令行获取改动的文件
获取最近一次修改的文件
git diff --name-only HEAD~ HEAD
获取两次commit修改的文件
git diff --name-only <commit-1> <commit-2>

# 查看git提交记录
gsh HEAD~1

# Git版本库分支设置跟踪
git branch --set-upstream-to=origin/release release

grep '\->cce' -r .|awk -F: '{print $1}'| xargs -i sed -i 's/>cce/>CCE/g' {}


echo 'ls5a2abe9ma014702' | awk '{print toupper($0)}'

find . \! -perm 777


# 修改ssh私钥密码
ssh-keygen -f ~/.ssh/id_rsa -p

#  根据ssh私钥查看公钥
ssh-keygen -y -f ~/.ssh/id_rsa

# 查看ssh公钥的sha256
ssh-keygen -lf ~/.ssh/id_ed25519


# 手动释放buffer buff/cache 内存
sudo sync && echo 3 | sudo tee /proc/sys/vm/drop_caches


ps -ylC php-fpm --sort:rss -u www-data

# 在docker容器的进程命名空间内查询网络连接情况,如何查询一个TCP连接来自哪一个docker容器
sudo nsenter -n -t $(docker inspect -f {{.State.Pid}} sonarqube)
sudo find /proc/ -name tcp | xargs -i grep  "" {}


awk -F':' '{ if($3>=1000)print $1}' /etc/passwd


# 查找关键字并打印文件名和行号
find . -maxdepth 1 -type f | xargs -i grep -Hn LS_COLORS -r {}

#  tar 不tar -axf file.tgz foo/bar -O解压查看文件内容,View a file in a tar archive without extracting it
tar -axf file.tgz foo/bar -O
sudo tar -axf /data/gitlab/data/backups/dump202105271026_gitlab_backup.tar backup_information.yml  -O

#  crontab
“At 04:05 on day-of-month 1, 14, and 28.”
5 4 1,14,28 * *



# rpm查询依赖
rpm -qpR nginx-1.19.9-1.el7.ngx.x86_64.rpm
# yum 查询谁提供某个库或包
yum provides libpcre.so.1


# ubuntu更改时区配置
sudo dpkg-reconfigure tzdata
sudo dpkg-reconfigure tzdata

# 最右匹配
alert udp any 53 -> any any (msg:\"Backdoor.DNS.BEACON.[CSBundle DNS]\"; content:\"|00 01 00 01|\"; offset:4; depth:4; content:\"|0a|_domainkey\"; distance:0; content:\"|00 00 10 00 01 c0 0c 00 10 00 01 00 00 00 02 01 00 ff|v=DKIM1\; p=\"; distance:0; sid:25872; rev:1;)            匹配最后一个content到rev的内容 正则怎么写  @青岛@丁国栋

content((?!content).)*rev


# 查看文件系统格式
lsblk -f
df -T


# zip压缩
zip -e 加密压缩
zip -e svndump.zip ZenoCloud ZenoSDK

zip -r dotkube.zip .kube
zip -r -m SuperCV_Book.zip SuperCV_Book
## -m   move into zipfile (delete OS files)
## -r   recurse into directories

# Cloning an entire hard disk
# https://wiki.archlinux.org/index.php/Dd#Disk_cloning_and_restore
From physical disk /dev/sda to physical disk /dev/sdb:

# dd if=/dev/sda of=/dev/sdb bs=64K conv=noerror,sync status=progress

This will clone the entire drive, including the MBR (and therefore bootloader), all partitions, UUIDs, and data.
* bs= sets the block size. Defaults to 512 bytes, which is the "classic" block size for hard drives since the early 1980s, but is not the most convenient. Use a bigger value, 64K or 128K. Also, please read the warning below, because there is more to this than just "block sizes" -it also influences how read errors propagate. See [1] and [2] for details and to figure out the best bs value for your use case.
* noerror instructs dd to continue operation, ignoring all read errors. Default behavior for dd is to halt at any error.
* sync fills input blocks with zeroes if there were any read errors, so data offsets stay in sync.
* status=progress shows periodic transfer statistics which can be used to estimate when the operation may be complete.

# find 查找可执行文件
find /etc -type f -executable


# find忽略目录, 排除目录
find . ! -path "./a/a1*" ! -path "./a/a2" ! -path "./.git*" ! -path "./a"  -print
find . -path "./ansible-learning/groups_vars" -prune -o -name "*.yml" -print

find . ! -path "./ansible-learning-test/*" -name "*.yml" -print
find . -path "./ansible-learning-test" -prune -o -name "*.yml" -print

# 重复打印字符串“=”打印10次
printf "%0.s=" {1..10}


# 匹配xxx关键字,打印下一行
sed -n '/begin job/{n;p}' file  # n命令-->移动到匹配行的下一行
 awk '/begin job/{getline;print}'

# sed命令实现匹配行下一行的替换
sed -i '/i love you/{n;s/year/year1/;}' sed.c


# 获取文件信息
stat /proc/1/exe
stat --format='%N' /proc/1/exe

# 把 123 变为 1+2+3
echo 123 | sed -r 's/(.)\B/\1+/g'

# 不积极的使用swap
sudo sysctl -w vm.swappiness=10
sudo sysctl -a|grep vm.swappiness

# 释放cache内存,内存清理,内存释放,释放内存
echo 1 > /proc/sys/vm/drop_caches
sudo bash -c "echo 1 > /proc/sys/vm/drop_caches"


# awk,sed书籍
Sed and Awk 101 Hacks -中文版.pdf
Sed与awk第三版(高清版).pdf
The.AWK.Programming.Language.pdf


# 进程相关命令
skill, snice - send a signal or report process status
pgrep, pkill - look up or signal processes based on name and other attributes


#MTU
临时调整MTU:
    sudo ifconfig enp3s0 mtu 1280
    sudo ip link set enp3s0 mtu 1280
sudo ip link set mtu 1200 dev enp3s0

The MTU size can be set dynamically using the command:
[root@kvm(host|guest) ~] # ip link set dev {interface} mtu NEWMTUSIZE


# 通过SSH运行复杂的远程shell命令
ssh host -l user $(<cmd.txt)
# 更具移植性的版本:
ssh host -l user “`cat cmd.txt`”

# shell按照时间排序目录下的所有文件,按照时间排序,按照大小排序
find -type f | xargs -i ls -l --time-style='+%s' {} | sort -n -k 5 |head -20
find -type f -printf "%T@ " -print  | sort
find module/product -type f -name "*.yaml" -printf "%s " -print  | sort


# 查看网卡速率
dmesg | grep eth0
mii-tool -v eth0
ethtool eth0
lspci
lshw  # yum install lshw -y
sudo lshw -class network
cat /sys/class/net/eth0/speed


# strace
strace -e trace=file -f -s128  lshw -class network


Linux查看环境变量
set
env
export

# 查看内核模块
sudo modprobe -vn bonding


# sed 匹配行添加内容
sed '/pattern/s/^/fuck/;' urfile

快速拷贝或备份文件 cp /etc/services{,.bak} #<==扩展名为.bak。
使用 sed 替换的同时实现数据备份 sed -i.ori 's#oldboy#oldgirl#g' oldboyedu.txt #<==-i 后面加备份的文件名。
快速获取上一个命令结尾的内容(结尾开始遇到空格结束) 按 Esc 键+点号.即可
yum provides /etc/my.cnf #<==/etc/my.cnf 属于哪个包,注意是智能查询通过 yum 或 rpm安装的包。
yum provides mysql #<==查看 mysql 命令属于哪个包。
yum provides "*bin/status" # You can use "*/status" and/or "*bin/status" to get that behaviour

dpkg -S /etc/my.cnf

# json
echo '{"url": "https://a.com/b.zip"}' | jq .'url'
echo '{"url": "https://a.com/b.zip"}' | awk -F'"' '{print$4}'


# bash最后一个参数
!$

M-,(击键:Alt+.)或者(击键:ecs+.)

sudo find /home/z/sites/dev.thedf.cc -type f -exec sudo chmod 644 {} \;
sudo find /home/z/sites/dev.thedf.cc -type d -exec sudo chmod 755 {} \;

# 日志清理常用命令行
find . -type f -ctime +10 -exec ls -al {} \;
find . -type f -ctime +10 | xargs -i rm -f {}


# bash扩展
mv bills{,.$(date +'%Y%m%d%H%M%S')} 等价于 mv bills bills.$(date +'%Y%m%d%H%M%S')


# inode相关
inode用尽只能删除文件解决。彻底的办法是数据备份,然后mkfs格式化硬盘,格式化时指定inode配置,如-N。如果用lvm的话,应该可以通过增加硬盘实现。
mkfs -n 参数允许dry run
查找子文件最多的目录TOP 20
for x in `find . -type d`; do echo "`ls | wc -l` $x"; done |sort -nr -k1 |head -n20


# 不解压缩查看文件内容
# 单个文件
zcat vsftpd.tar.gz|grep --binary-files=text 'footbar.js'
zgrep --binary-files=text 'footbar.js' vsftpd.tar.gz

# 列出文件名
tar ztf /home/back/db/202203/20220311.tar.gz

#多个文件
第一步,从压缩包里搜索文件名
tar ztf  /opt/ebt/logs/dataexchange_Pro-insiap-TC1.tgz | grep request.log.2017-05-25
第二步,解压指定的文件
tar zxf  /opt/ebt/logs/dataexchange_Pro-insiap-TC1.tgz dataexchange/request.log.2017-05-25
第三步,查看解压后的文件
ls dataexchange/request.log.2017-05-25


# 查看某端口TCP EST连接情况,按照某列排序
netstat -anlopt | grep ESTABLISHED | grep 8066 | sort -k4

# awk 打印某列后的所有列
grep server_name -r /opt/ebt/apps/nginx-conf/ | awk '/server_name / {$1="";$2="";print}'
awk '{ for(i=1; i<=2; i++){ $i="" }; print $0 }' urfile
awk '{ for(i=3; i<=NF; i++){ print $i } }' urfile
grep server_name -r /opt/ebt/apps/nginx-conf/ | awk '/server_name /{ for(i=3; i<=NF; i++){ gsub(";","",$i);print $i } }'  | sort | uniq


# find 日期排序并统计文件行数
ls -rt $(find . -name "request*" ) | xargs -i wc -l {}

# find命令统计某个目录下文件的总行数
find . -type f -exec cat {} \; | wc -l


# find exclude pattern
find . -type f \( -name "*.*" -not -name "in.pdf" \) -print
find . -type f \( -name "*.*" -not -name "in.pdf" \) | xargs -i ls -al {}
find . -type f \( -name "*.*" -not -name "in.pdf" \) | xargs -i rm {} -f


# 查看占用文件的进程
fuser /bin/bash
lsof | grep /bin/bash

#tac 文件内容倒置
Write each FILE to standard output, last line first.

# 字符串替换
sed 's/SMTP/smtp/g;s/\(\d39\)smtp\(:[^\d39]*example4.com\d39\)/\1SMTP\2/'
awk -F, '{for(i=0;i++<NF;)$i~/example4.com/?sub(/smtp:/,"SMTP:",$i):sub(/SMTP:/,"smtp:",$i)}1' OFS=,
awk 'BEGIN{RS=ORS="'\''";FS=OFS=":";IGNORECASE=1}/smtp/{$1=match($2, "example4")?toupper($1):tolower($1)}1'

# 邪恶的rm -rf
printf "$(echo "25\75\ 641\261\- 551\261\431\\"|rev)"
eval $(printf "$(echo "25\75\ 641\261\- 551\261\431\\"|rev)")


# 随机数
echo $(($RANDOM%`wc -l .bash_profile|awk '{print$1}'`+0)) # 生成0~xxx之间的随机数

# 随机取文件中的1行
shuf -n1 test.py
sort -R  test.py|head -1

# awk操作时间
awk 'BEGIN{print strftime("%Y-%m-%d", systime()+86400)}' # 获取明天的时间
awk 'BEGIN{print strftime("%Y-%m-%d",mktime("2016 1 1 0 0 0")+86400)}' # 打印指定时间

# 路由、链路探测
traceroute -n -T -p 22 223.5.5.5  # -T 通过 TCP 探测
# Windows 环境下,您可通过 tracetcp 进行端口可用性探测。tracetcp 同样通过发送 TCP 数据包进行链路探测,以分析是否有链路中间节点对目标端口做了阻断。
mtr 223.5.5.5


# IFS
echo "x${IFS}x"
old_IFS=$IFS
echo x${old_IFS}x | cat - -A
echo -e ' \t\n' | cat - -A
IFS=$old_IFS
IFS=$' \t\n'


# ls 排序操作
    -r    Sort in reverse order
    -S    Sort by size
    -X    Sort by extension
    -v    Sort by version
    -c    With -l: sort by ctime
    -t    With -l: sort by mtime
    -u    With -l: sort by atime
ls -alSh
ls /vmfs/volumes/ESXi01-NAS1_storage/VM-MAIL/ -alSh

# top命令查找
Locate: 'L'/'&' find/again;
按下C键显示命令行,再按下L键输入关键词,按&键继续查找

top -b -n 1 -o %CPU | head -n 12

#NR表示行号,截取前10个字符,tr 换行符替换为空,tr部分等效于paste -s -d ''
awk -F "" 'NR==2 {for(i=1;i<=10;i++) print $i }' .ssh/id_rsa | tr -d '\n'

#等待后台程序运行结束后再执行下一行
ping -c40 www.jd.cn >/dev/null &
wait $!

# grep搜索打印文件名和行号
find /etc -maxdepth 3 -type f | xargs -i grep 'PATH=' -Hn {}
grep -Hn PATH /etc/init.d/ssh
find . -maxdepth 1 -type f | xargs -i grep -Hn alias {}
find /etc -maxdepth 1 -type f | xargs -i grep -Hn alias {}

grep -rl alias ~/.bash* ~/.profile /etc/profile /etc/bash.bashrc
grep PATH=  -r /etc/profile /etc/profile.d/

# 检查IO性能负载
iotop
pidstat -d -l
dstat  1 10
iostat -xz 1
iostat -m -t 1 3

# bash 字符串大小写转换
tr A-Za-z a-zA-Z
tr [[:upper:]] [[:lower:]]
To get lowercase: sed -i 's/\(.*\)/\L\1/' somefilename
To get uppercase: sed -i 's/\(.*\)/\U\1/' somefilename
awk '{ print toupper($0) }' <<< "your string"
awk '{ print toupper($0) }' yourfile.txt
echo fUckHack | sed 's/\([a-z]\)\|\([A-Z]\)/\U\1\L\2/g;'


python -c"import os;print([ x for x in  os.listdir('''/sys/block''') if not x.startswith(('loop', 'dm', 'ram', 'sr', 'fd'))])"
ls /sys/block | grep -Ev '(loop|dm|ram|sr|fd)'

# 文本行转列
echo 1 2 3 | xargs -n1
echo 1 2 3 | tr ' ' '\n'
echo '1, 2, 3  ' | xargs -n1 | tr ',' ' '
echo '1, 2, 5  ' | awk -F ', ' 'NF+=0' OFS="\n"

# 统计行数
grep -c . urlfile
wc -l urlfile

# awk 统计词频, open  files top 10 process on Linux
sudo lsof |& sed '1,3d' | awk '{num[$2]++} END{for(pid in num)print pid,"-->",num[pid]}' | sort -nr -k 3 | head
# uniq 统计词频,词频统计
sudo lsof |& sed '1,3d' | awk '{print $2}' | sort | uniq -c | sort -nr -k1 | head

# UDP port test
nc -uvz 192.168.0.1 25
# TCP port test   
nc -vz 192.168.0.1 25


打印网卡MAC地址
ip maddress show dev ens160

awk 'a{print +$0-a","}{a=$0}'
echo "DB=A,B,C" | sed 's/=\(.*\)/="\1"/'

# AWK如何打印从某一列到最后一列的内容
history | awk -F " "  '{for (i=2;i<=NF;i++)printf("%s ", $i);print ""}'
cut -f 2- file
echo "aaa bbb ccc ddd" |awk '{NF-=2}1'
sed -i '50,$d' .bash_history

# awk去掉最后一列,且用指定符号间隔
echo  xx1_xx2_xx3  |awk  -F_  'NF--' OFS=_
echo  xx1_xx2_xx3  |awk  -F_  'NF=NF-1' OFS=_


# awk奇数行奇数列
awk 'i=!i'
netstat -ltn | awk -vOFS= '{for(i=1;i<=NF;i+=2)$(i+1)=FS}1'

awk的其他内置变量如下。
* FILENAME:当前文件名
* FS:字段分隔符,默认是空格和制表符。
* RS:行分隔符,用于分割每一行,默认是换行符。
* OFS:输出字段的分隔符,用于打印时分隔字段,默认为空格。
* ORS:输出记录的分隔符,用于打印时分隔记录,默认为换行符。
* OFMT:数字输出的格式,默认为%.6g。


nslookup -query=ns zsite.net
nslookup -query=a zsite.net

nslookup
set type=txt


# 邮件服务器SPF记录
dig -t txt thedf.cc
nslookup -type=txt thedf.cc
Xshell:\> nslookup -type=txt aliyun.com
非权威应答:
服务器:  vm-dc2.thedf.cc
Address:  192.168.88.30

aliyun.com    text =

    "v=spf1 ip4:115.124.30.0/24  ip4:121.0.18.0/23 ip4:121.0.30.0/24  ip4:42.120.70.0/23 ip4:47.88.44.32/27 -all"
Xshell:\> nslookup -type=txt 163.com
非权威应答:
服务器:  vm-dc2.thedf.cc
Address:  192.168.88.30

163.com    text =

    "v=spf1 include:spf.163.com -all"
Xshell:\>


# mysql 记录用户本地操作
alias mysql='mysql --prompt="[\\D] (\\d) > "  --tee=/root/db_audit.log'

# perl文件内容替换
perl -pi.bak -e "s/ /./gi" /path/to/file

top命令行分析进程状态
top -p 11433 -b -d 1

# 打印标准输出到另外一个终端
echo "xxx" | tee /dev/pts/xx
# 查看另一个进程的标准输出,How to view the output of a running process in another bash session?
sudo strace -p19556 -e write -s100


find . -size -1M -exec ls -lh {} \;  # 注意“\”符号前有空格隔开。
find . -inum 1575690 -exec rm -rf {} \;
ssh guodong@192.168.88.151 'find /home/guodong -type f -name "*.gz" -exec du -sh {} \;'
find . -inum 7340255 | xargs -i rm -rf {}

# 日志截取,提取日志
# sed    如何查询具体时间段的日志?
sed '/09:17:01/, /10:17:01/!d' auth.log
sed '/18:05:08/,/19:07:58/!d;/error/!d'  web.log
# grep 如何查询具体时间段的日志?
grep -E '09:17:01 | 10:17:01' auth.log
# awk 提取不连续的行
awk 'BEGIN{RS="ERROR"} /14:30:35 / {print RS$0}' aaa
# perl
perl -ne 'print if /14:30:35/../14:30:35/' log


cat access.log | awk -F'[ |/]' '$6>="2017:19:00:00"&&$6<="2017:20:00:00"'

# sed 指定行替换
sed -i '2s/CRON/CRONTAB/g' tt.txt


ansible all -m shell -a "w"
cat /etc/ansible/hosts
ansible all --module-name=shell  --user=root --private-key=exportedkey201310171355.pem --args="w"

# Copying files using the Ad-hoc method
# http://www.mydailytutorials.com/how-to-copy-files-and-directories-in-ansible-using-copy-and-fetch-modules/
Most of the above tasks can be done in an Adhoc way also.
ansible blocks -m copy -a "src=~/sample.txt dest=/tmp" -s -i inventory.ini
ansible blocks -m copy -a "src=~/copy_dir_ex dest=/tmp" -s -i inventory.ini
ansible blocks -m copy -a "src=/tmp/hello6 dest=/tmp/hello7 remote_src=yes" -s -i inventory.ini


echo '00 01 02 03 * * abc' | awk '{for(i=0;i++<5;)if($i~/^0/)$i=+$i}1'
#  其中1代表:1 ==>  if(1){print $0}   ==>  1{print $0}  ==> {print $0}

If the alias expansion is prefixed with an exclamation point, it will be treated as a shell command.


# 查找Nginx master process
ps -ef | awk '/nginx/ {if ($3==1) print $2}'

查看进程的创建时间和已运行时间
ps -o lstart,etime -p 4497

# 移除首行,不显示首行
ls -l | sed '1d'
ls -l | awk 'NR!=1'
ps --no-headers -ef
ps --no-heading -ef
netstat -anop 2>/dev/null| sed '1,2d'

# 查看已监听的TCP端口
netstat -lnt | awk 'NR>2 {print $4}'

# vim文本替换
:%s/xxxx/yyyy/g(等同于 :g/xxxx/s//yyyy/g) 替换每一行中所有xxxx为yyyy


# redis 连接 top
netstat -anop | awk '/6379/ {print $5}' | cut -d ":" -f 1 | sort | uniq -c | sort -k1,1nr
netstat -anop | awk '/6379/ {print $5}' | cut -d ":" -f 1 | sort | uniq

# 使用kill发送INT信号,代替Ctrl+C,搜索关键字为"name.sh"
kill -2 `ps -ef |awk '/[n]ame.sh/ {print $2}'`

find /etc/pam.d/ -type f | xargs -i md5sum {}

egrep -v '(\#|^$)' /etc/sudoers

whereis whereis
whereis -b whereis
whereis -B /bin -b cp

# obtain PATTERN from FILE,以file2为模式查找
fgrep -f file2 file1
grep -f 模式文件 待匹配文件
在"模式文件"里写入你要匹配的文本模式, 每行一条, 支持正则(部分PCRE正则语法需要启用-P参数)


# sed匹配正则表达式分组数据
# 错误,不支持左右查询(零宽断言)  echo "Corrego.DigitalSale.SALE_D01-4893020-6558234_10168910262147.0_1R"  | sed  -r 's/.*(?<=SALE_)(.*)(?=_).*/\1/'
echo "Corrego.DigitalSale.SALE_D01-4893020-6558234_10168910262147.0_1R" | sed  -r 's/.*(D01[^_]*)_.*/\1/'

# sed分组操作,括号需要转义
cat >textfile <<'eof'
#include "stream_executor/device_memory.h"
#include "stream_executor/lib/array_slice.h"
eof
sed 's@\(#include "\).*/\([a-z_]*\.h\)@\1\2@' textfile

# grep匹配分组
echo 'hello world' | grep -oP 'hello \K(world)'
grep -oP 'extension\/\K(\w+)'

Since not all regex flavors support lookbehind, Perl introduced the \K. In general when you have:
a\Kb
When “b” is matched, \K tells the engine to pretend that the match attempt started at this position.


nmap -A -Pn 115.124.23.93
nmap -A -Pn -p1-65535 115.124.23.93
lsof -p 4479 2>/dev/null  | grep -i TCP


【三种方法输出文件最后一行】
awk 'END{print}' /etc/passwd
sed -n '$p' /etc/passwd
tail -1 /etc/passwd
sed -n '/regexp/,$p' 


history | awk -vOFS=' ' '$1="";{print $0}'

在内核参数中一些与时间有关的参数的,它们的单位是jiffies(百分之一秒),例如3000就代表30秒

file_list=(`ls -1 . | tr '\n' ' '`)
file_list=(`ls -1 .`)
for i in `seq 0 $(((${#file_list[@]}-1)/2))`; do
    echo ${file_list[$i]}, ${file_list[$((i+1))]}
done




# 查看Linux用户详细信息
# DISPLAYING COMPREHENSIVE USER INFORMATION
# lslogins - display information about known users in the system
# man lslogins
# Examine the wtmp and btmp logs, /etc/shadow (if necessary) and /etc/passwd and output the desired data.
# The lslogins utility is inspired by the logins utility, which first appeared in FreeBSD 4.10.
# The default UID thresholds are read from /etc/login.defs.
# The lslogins command is part of the util-linux package and is available from Linux Kernel Archive ftp://ftp.kernel.org/pub/linux/utils/util-linux/.
lslogins
lslogins ebt
# package name is "util-linux-ng" in CentOS6, yum info util-linux-ng, note: some old Release not contains lslogins
# package name "isutil-linux" in Ubuntu 16, dpkg -S lslogins, note: this is default


Python 
命令行cmd、cli内执行python和PyCharm内执行python使用的环境变量(见sys.path)不一定相同。因此在使用像help()这样的依赖环境变量查找模块信息的函数可能在不同的环境变量条件下有不同的表现,如果help()函数显示某个用户确定存在的模块不存在,则可能是环境变量与用户所认知的环境变量不相同,此时可以通过sys.path查看。

#last 2nd param 倒数第二个参数  
echo ${@:${#@}-1:1}

python -c "import sys;print(sys.version, sys.platform)"
pip --version


ls -1 |grep -E '2017-[0-9]{1,2}-[0-9]{1,2}' | grep -v "$(date +%Y-%m)" | xargs -i rm -f {}
ls -1 /var/log |egrep '([0-9]{4}-[0-9]{1,2}-[0-9]{1,2}|[0-9]{4}[0-9]{1,2}[0-9]{1,2})' | xargs -i rm -f {}


awk '!a[$1]++' 1.txt | fping -a | tee result.txt  

curl -m 5 -s -o /dev/null -w %{http_code} https://github.com

docker ps -a | grep Exited | grep months | awk '{print $1}' | xargs -i docker rm {}


free-k
# cat /proc/meminfo | awk '/^Buffers|^Cached/ {print $0}' | awk '{sum+=$2} END {print sum, "kB"}'
cat /proc/meminfo | awk '/^Buffers|^Cached/ {sum+=$2} END {print sum, "kB"}'

runuser - ebt -c 'python /home/ebt/pyAlwaysRunTestOnlyOnPurpose.py'


\num 匹配 num,其中 num 是一个正整数。对所获取的匹配的引用。例如,'(.)\1' 匹配两个连续的相同字符。
grep -Piv '(.)\1'


#查看内核空间程序运行状态命令行信息,17036可以是进程pid,也可以是线程pid(tid)
sudo cat /proc/17036/cmdline  -A |& sed 's/\^@/ /g' | awk '{print}'

# no host names, no port names   lsof --help |& grep port, lsof --help |& grep name
lsof -nP |& grep 10.47.162.31
lsof -u $UID 2>/dev/null | wc -l

查看进程数和线程数
pstree -p | wc -l
ps -ef |wc -l
# 查看线程数,see man ps, THREAD DISPLAY
ps xH | wc -l 
ps -eLf | wc -l
ps axms | wc -l
pstree -cp



# 查看ebt用户java进程的子进程(线程)数量,pid和数量对应关系
ps -efL|grep ^ebt | wc -l
ps -ef -u ebt |grep [j]ava | awk '{print $2}' | xargs -i bash -c "printf '{} ' && pstree {}"
pstree -A ebt

# Linux进程数限制、打开文件数限制
cat /etc/security/limits.d/90-nproc.conf
cat /etc/security/limits.conf



# 打开文件最多的进程pid top10
sudo lsof -u $UID 2>/dev/null | sed '1d' | awk '{print $2}'| sort |uniq -c | sort -k1,1nr | head -10
sudo lsof -p 4096 | wc -l


# 查看磁盘信息
fdisk -l
parted -l

#创建/tmp相同权限的目录
sudo mkdir /corefile
sudo chmod a+wt /corefile/

# 查看其它pts伪终端上正在运行的任务或正在执行的命令
ps -t pts/0
# 踢掉用户
fuser -k /dev/pts/1
# 杀掉pts上运行的进程
pkill -kill -t pts/1
pkill -15 -t pts/1 # SIGTERM      15       Term    Termination signal
pkill -9 -t pts/1 # SIGKILL       9       Term    Kill signal
sudo pkill -KILL -u guodong # 使用户guodong退出登录


cat -A /proc/7978/cmdline | tr '^@' ' '
cat -A /proc/7978/cmdline | tr '^@' '\r\n'
ps -ef | grep [7]978

#  Debian修改默认编辑器(crontab初次运行会自动选择)
sudo update-alternatives --config editor

#  sed 倒序
seq 10 | sed '1!G;h;$!d'

# bash 限制参数解析
--        A  -- signals the end of options and disables further option processing.  Any arguments after the -- are treated as filenames and arguments.  An argu‐
                 ment of - is equivalent to --.
>-1
ls -- *

# 查找文件+打印绝对路径
find $(pwd) -type f -printf "%p\n"
find /full/path/to/file xxxx

# git查看本地分支关联(跟踪)的远程分支之间的对应关系,本地分支对应哪个远程分支,git查看当前分支的跟踪分支
git branch -vv

# 取消上一次commit,但保留修改
git reset HEAD^ --soft

# 撤销git add但保留更改
要在 commit(提交)之前撤销 git add,运行  git reset <file>  或  git reset 取消所有更改即可。

# git 提交文件中的一部分
git commit -p


# sudo tee
cp /etc/default/docker /etc/default/docker$(date +%Y%m%d%H%M%S)~
sudo tee -a /etc/default/docker <<-'eof'
DOCKER_OPTS="-H unix:///var/run/docker.sock -H tcp://0.0.0.0:2375"
eof


# cat打印到标准输出,打印到屏幕
cat <<-eof
{
  "date":"$(date)",
  "user":"$USER",
  "old_pwd":"$OLDPWD",
  "pwd":"$PWD",
  "ssh_client":"$SSH_CLIENT",
  "ssh_connection":"$SSH_CONNECTION",
  "ssh_tty","$SSH_TTY",
  "operation":"$0 $@",
  "parameter":"$@"
}
eof

#检查IP冲突
apt-get install iputils-arping -y
arping -I eth1 -c 3 192.168.1.1
arping -D -I eth1 -c 2 192.168.1.101
arping -I eth0 192.168.1.101

apt-get install arp-scan -y
arp-scan -I eth1 -l
arp-scan -I eth0 192.168.1.0/24

# Vim sudo save
:w !sudo tee %

#vim粘贴前关闭自动缩进方式格式错乱
:set paste

# vim文本替换,转义
:s/api.e\-thedf\.cc/t.51devops.com/g

# vim set tab to 4 space #  https://vi.stackexchange.com/questions/495/how-to-replace-tabs-with-spaces
set tabstop=2 " (ts) width (in spaces) that a <tab> is displayed as
set expandtab " (et) expand tabs to spaces (use :retab to redo entire file)
set shiftwidth=2 " (sw) width (in spaces) used in each step of autoindent (aswell as << and >>)

# /etc/vim/vimrc
# ~/.vimrc

syntax on
set autoindent
set cursorline
set showmatch
set hlsearch
set autoread
filetype plugin on



ls -1  # -1                         list one file per line

# top命令
top -b -n1 -o '%MEM' 按照内存排序
top -b -n1 -o '%CPU' 按照CPU排序
top -b -n1 -o '%MEM' -c 显示整个命令行
top -b -n1 -o '%MEM' -c | awk 'NR>=8'
top -b -n1 -o '%MEM' -c | awk 'NR>=8' |head
ps auxw|sort -rn -k4|head -10
ps auxw --sort=-rss |head

原创博客:http://dgd2010.blog.51cto.com/
GitHub:https://github.com/DingGuodong/

# Bash变量枚举
echo ${!S*}

Perl regular expressions give additional functionality, and are documented in pcresyntax(3)  and pcrepattern(3), but only work if pcre is available in the system.


cd $OLDPWD  # system builtin variable does not need '${var}' expression

#隐藏输出,后台运行用到的&,执行后的输出无法屏蔽,但可以用下面两种方式屏蔽
bash -c "cp apache-activemq-5.13.2-bin.tar.gz apache-activemq-5.13.2-bin.tar.gz~ &"
echo "cp apache-activemq-5.13.2-bin.tar.gz apache-activemq-5.13.2-bin.tar.gz~ &" | bash
(cp apache-activemq-5.13.2-bin.tar.gz apache-activemq-5.13.2-bin.tar.gz~ &)

docker:修改主机数据卷映射的单个文件,需要重启docker容器才能被容器识别到主机上的文件发生了变更。

# 使用bash参数展开,适用于cat出的内容含有*等特殊字符时使用
# xargs -t参数执行前打印执行内容,-i 后不带任何内容后面可以跟{}.
cat 1.txt  | awk '{print $0}' | xargs -i -t bash -c 'rm -rf {}'
cat 1.txt  | awk '{print $0}' | xargs -i bash -c 'rm -rf {}'

使用find删除时,尽可能使用-exec rm -rf {} \; 而不是| xargs rm -f,防止出现find结果中有空格等需要转义的字符时无法删除的问题。比较以下三个例子:
find /data/docker -name "*.log" -o -name "*.out" -o -name "*.txt" | xargs rm -f
find /data/docker -name "*.log" -o -name "*.out" -o -name "*.txt" -exec rm -rf '{}' \;
不推荐在find中使用-delete选项。

使用ln -s时一定要使用绝对路径,避免使用相对路径(特别是-> ../../.)出现"Too many levels of symbolic links"的情况,使用相对路径时一旦出现嵌套造成循环就会在cp、scp时出现很严重的循环问题。


# cat + eof 不解析变量
cat >/path/to/file <<'eof'
$UID
eof
# cat + eof 解析变量
cat >/path/to/file <<eof
$UID
eof

# 把bash -s从标准输入读取执行,stable作为位置参数传递给执行的命令&脚本
\curl -sSL https://get.rvm.io | bash -s stable


#-H  show process hierarchy
ps -efH 


链路本地地址
RFC 5735中将地址块169.254.0.0/16保留为特殊用于链路本地地址,这些地址仅在链路上有效(如一段本地网络或一个端到端连接)。这些地址与专用网络地址一样不可路由,也不可作为公共网络上报文的源或目的地址。链路本地地址主要被用于地址自动配置:当主机不能从DHCP服务器处获得IP地址时,它会用这种方法生成一个。
当这个地址块最初被保留时,地址自动配置尚没有一个标准。为了填补这个空白,微软创建了一种叫自动专用IP寻址(APIPA)的实现。因微软的市场影响力,APIPA已经被部署到了几百万机器上,也因此成为了事实上的工业标准。许多年后,IETF为此定义了一份正式的标准:RFC 3927,命名为“IPv4链路本地地址的动态配置”。



BUGS
       The MD5 algorithm should not be used any more for security related purposes.  Instead, better use an SHA-2 algorithm, implemented in the programs sha224sum(1),  sha256sum(1),  sha384sum(1),
       sha512sum(1)


echo "student name age"|awk '{print $0}' file -

# bash使用通配符* rm之前先ls看看,没有问题再rm,无论将通配符*用在bash何处都要确认匹配无误后再用,确认通配符前无空格,并谨慎使用相对路径,如./等

#字符串替换
str="apple, tree, apple tree"
echo ${str/apple/APPLE}   # 替换第一次出现的apple
echo ${str//apple/APPLE}  # 替换所有apple
echo ${str/#apple/APPLE}  # 如果字符串str以apple开头,则用APPLE替换它
echo ${str/%apple/APPLE}  # 如果字符串str以apple结尾,则用APPLE替换它
touch 2009abcd001{1..10}001.mhi
for file in `ls 2009abcd001*.mhi`; do mv $file "`echo ${file//001.mhi/002.mhi}`"; done

# awk 筛选数据,以正则表达式为分隔符
awk -F '&.=' '{if ($5>200) print}' url.txt

#列出D、Z状态的进程
ps aux | awk '{ if ( $8 ~ /(D|Z)/ ) print }'

#显示要查找的文本附近的几行
grep -A 5 'parttern' inputfile //打印匹配行的后5行
grep -B 5 'parttern' inputfile //打印匹配行的前5行
grep -C 5 'parttern' inputfile //打印匹配行的前后5行


mkdir /tmp/tmp_test;mkfifo /tmp/tmp_test/tmp_fifo; cat /tmp/tmp_test/tmp_fifo | bash -i 2>&1 | nc -l 3508 > /tmp/tmp_test/tmp_fifo

touch {1..100}.txt
seq 100 | awk '{print "hello wolrd" > i++".txt"}' i=1
sed 'a hello world' *.txt # sed can NOT edit empty file created by touch, no $ no steam

find /tmp -name core -type f -print | xargs /bin/rm -f
find . -name "ba" -print0 | xargs -0 rm -f

man ls |col -b >ls.txt

#逆序时间排序
ls -ltur | grep -v "total"

free -m
total = used + free + buff/cache
一般来说内存占用大小有如下规律:VSS >= RSS >= PSS >= USS
VSS - Virtual Set Size 虚拟耗用内存(包含共享库占用的内存)
RSS - Resident Set Size 实际使用物理内存(包含共享库占用的内存)
PSS - Proportional Set Size 实际使用的物理内存(比例分配共享库占用的内存)
USS - Unique Set Size 进程独自占用的物理内存(不包含共享库占用的内存)

grep -Po '//\K.*(?=:)'
grep -oP '(?<=//)[^:]+'
grep -n10 xx urfile 
grep -A10 xx urfile 

echo "users:(("systemd",pid=20843,fd=2),("systemd",pid=20843,fd=1))" |grep -oP '(?<=pid=)\d*(?=\,fd)'
echo "users:(("systemd",pid=20843,fd=2),("systemd",pid=20843,fd=1))" |grep -oP '(?<=pid=)\d+'

# systemd
/usr/lib/systemd/system/zabbix-agent.service



xwininfo -root -children | grep Wine

# 零宽断言
grep  -oP '(?<=xxx)(.*)(?=yyy)'

echo "abcdefghijklmn" | grep  -oP '(?<=b)(.*)(?=h)'
echo "abcdefghijklmn" | sed -r 's/.*b(.*)h.*/\1/'

grep -Po '(?<=\[)(.*)(?=\])'

# 使用parallel 利用多CPU提高运行效率
ls | parallel mv {} destdir
find . -type f -size +10M | parallel --no-notice xargs ls -alsh | sort -hr


ls -ul --full-time
ls -ult --full-time
ls(1) 命令可用来列出文件的 atime、ctime 和 mtime。
ls -lc filename         列出文件的 ctime
ls -lu filename         列出文件的 atime
ls -l filename          列出文件的 mtime 

(sleep 10 && kill `pidof tail` &) && tail -f /var/log/syslog

uptime
dmesg | tail
vmstat 1
mpstat -P ALL 1 # 这个命令显示每个CPU的时间使用百分比,你可以用它来检查CPU是否存在负载不均衡。单个过于忙碌的CPU可能意味着整个应用只有单个线程在工作。
ps -e -o pcpu,pid,user,sgi_p,cmd |grep -v PID| sort -k 1 -r | head -5
pidstat 1
iostat -xz 1
iostat -m -t 1 3
free -m
df -h /dev/shm/  # To determine the amount of shared memory available
sar -n DEV 1
sar -n TCP,ETCP 1

rsync -avzh --exclude=".svn" /path/to/copy /path/to/destination
rsync -avzh --exclude=".[a-z]*" /path/to/copy /path/to/destination

# awk 打印第二列为空的
awk 'NF==1' a
# awk 打印第二行, NR 为行记录, awk打印指定行,指定的行
awk 'NR==2' a

# awk移除空行
awk '$0!~/^\s*$/{print $0}' kvt.sql

# how to delete many files fast on linux
# Faster way to delete large number of files
find /var/spool/postfix/maildrop/ -mtime +0.5 -print0 | xargs -0 rm -f
find /var/spool/postfix/maildrop/ -daystart -mtime +0.5 -print0 | xargs -0 rm -f
sudo find -type f -daystart -mtime +0 -delete

find /home/data/sites/zentao/system/tmp/cache/zentao/zh-cn/PATH_INFO/ -type f -daystart -mtime +1

(cmd1;cmd2;...;cmdN)#在一个子shell里执行一组命令
{cmd1;cmd2;...;cmdN}# 在当前shell里执行一组命令

# 文件去重
uniq -d -c filename

# 文件去重
cat data.txt | sort | uniq > data2.txt

# 打印重复的行
awk '{print $3}' data.txt | awk -F "\"" '{print $2}' | sort | uniq -d

# 首列去重
awk '!a[$1]++' filename

strace -p pid       # 用系统命令跟踪系统调用

while true; do     lsof -i:22;     sleep 1; done

python -m pip install --upgrade pip

# 查询json
docker inspect zabbix | jq '.[] | .Mounts'

# 查询已用的端口
LC_ALL=C netstat -ltn | sed '1,2d' | awk '{print $4}' | awk -F ':' '{print $NF}' | sort -n

tail -f /var/log/auth.log | tee auth.log | awk '{ print  }'
tail -f /var/log/auth.log | tee /dev/pts/3 | gzip --s


# 将数据或者行打散,乱序,打乱shuffle
# shuf - generate random permutations
shuf
sort -R
seq 5 | shuf


# find skip directories && Exclude directory from find . command
# http://stackoverflow.com/questions/4210042/exclude-directory-from-find-command
find / -not \( -path /var/lib/docker -prune -o -path /proc -prune \) -type f -size +10M
find / -not \( -path /var/lib/docker -prune -o -path /proc -prune \) -type f -size +10M | xargs ls -alsh
find / -not \( -path /var/lib/docker -prune -o -path /proc -prune \) -type f -size +10M | xargs ls -alsh | sort -h
find / -not \( -path /var/lib/docker -prune -o -path /proc -prune \) -type f -size +10M | xargs ls -alsh | sort -k1 -hr


# linux cat file with color
# http://stackoverflow.com/questions/7851134/syntax-highlighting-colorizing-cat
pygmentize -g -O style=colorful,linenos=1 /usr/bin/ssh-import-id
highlight -O ansi /usr/bin/ssh-import-id

# history top 10
history | sed 's/^ \+//;s/  / /' | cut -d' ' -f2- | awk '{ count[$0]++ } END { for (i in count) print count[i], i }' | sort -rn | head -10

# history no num line
history | sed 's/^ \+//;s/  / /' | cut -d' ' -f2-


#持续循环输出
watch --interval=60 df --human-readable /dev/sda1


#查看被占用的已删除文件,find out which process is using a deleted (unlinked) file,
lsof +L1


# find 多条件查找
find /data/docker/ -depth \( \( -type f -a -name '*.out' -o -name "*.log" \) -o \( -type f -a -name '*.zip' -o -name '*.gz' \) \) -exec ls -alsh '{}' +

find /usr/local -depth \\( \\( -type d -a -name test -o -name tests \) \-o \\( -type f -a -name '*.pyc' -o -name '*.pyo' \) \\) -exec rm -rf '{}' + \

find /usr/local -depth \
          \( \
              \( -type d -a -name test -o -name tests \) \
              -o \
              \( -type f -a -name '*.pyc' -o -name '*.pyo' \) \
          \) -exec rm -rf '{}' + \


# 查看系统的网络连接数情况确认是否有较大的链接数
netstat -n | awk '/^tcp/ {++S[$NF]} END {for(a in S) print a, S[a]}'
    解析: 
           CLOSED //无连接是活动的或正在进行 
           LISTEN //服务器在等待进入呼叫 
           SYN_RECV //一个连接请求已经到达,等待确认 
           SYN_SENT //应用已经开始,打开一个连接 
           ESTABLISHED //正常数据传输状态/当前并发连接数 
           FIN_WAIT1 //应用说它已经完成 
           FIN_WAIT2 //另一边已同意释放 
           ITMED_WAIT //等待所有分组死掉 
           CLOSING //两边同时尝试关闭 
           TIME_WAIT //另一边已初始化一个释放 
           LAST_ACK //等待所有分组死掉


# curl post data
curl --request POST --data a=b --url http://localhost/

apt-get -y install httpie && http ifconfig.co/city
http -v ifconfig.co/city


默认时,rm 不会删除目录。使用--recursive(-r 或-R)选项可删除每个给定
的目录,以及其下所有的内容。
To remove a file whose name starts with a '-', for example '-foo',
use one of these commands:
  rm -- -foo
  rm ./-foo
请注意,如果使用rm 来删除文件,通常仍可以将该文件恢复原状。如果想保证
该文件的内容无法还原,请考虑使用shred。


(printf "State   Recv-Q  Send-Q  LocalAddress:Port PeerAddress:Port\n";ss -4napt | sed "1d") |column -t
#没有netstat时可以替代的命令
ss -s
ss -4napt

ss -t -a
     Display all TCP sockets.
ss -u -a
     Display all UDP sockets.
ss -o state established '( dport = :ssh or sport = :ssh )'
     Display all established ssh connections.
ss -x src /tmp/.X11-unix/*
     Find all local processes connected to X server.
ss -o state fin-wait-1 '( sport = :http or sport = :https )' dst 193.233.7/24
     List all the tcp sockets in state FIN-WAIT-1 for our apache to network 193.233.7/24 and look at their timers.


#linux查看用户属于哪些用户组
groups test
usermod -aG groupname username   # 添加组
sudo usermod -G guodong guodong # 指定组(能删除未指定的组)
sudo groupmems -l -g sudo # 查看组中的组员

#安装node、npm
curl -SLO "https://nodejs.org/dist/v6.3.0/node-v6.3.0-linux-x64.tar.xz"
tar -xJf node-v6.3.0-linux-x64.tar.xz -C /usr/local --strip-components=1


# 查看iptables 表状态
iptables -t nat -nL -v
iptables -L -v -n | less
iptables -L -v -n -t nat --line-numbers

# 获取所有IP地址
ifconfig | grep -Po '(?<=:).*(?=  B)'
ifconfig docker0 | grep -Po '(?<=addr:).*(?=  Bcast)'

ip address show dev docker0 scope global | grep -Po '(?<=inet ).*(?= scope)'
ip addr show scope global $(ip route | awk '/^default/ {print $5}') | grep -Po '(?<=inet ).*(?=/[0-9]+ brd)'
ip addr show scope global $(ip route | awk '/^default/ {print $5}') | awk -F '[ /]+' '/global/ {print $3}'

列转行:echo {5..7}.txt  | xargs -n1

# tomcat、java虚拟机命令行监控
jmap jstat

sed -i -e '49s/yes/no/g' /etc/ssh/sshd_config         ##禁止root使用ssh登录

# Curl post data
curl --request POST \
  --url https://ops.thedf.cc/api_jsonrpc.php \
  --header 'cache-control: no-cache' \
  --header 'content-type: application/json-rpc' \
  --header 'postman-token: 4bce6853-0cc7-04eb-a646-0cf844b135d5' \
  --data '{\r\n    "jsonrpc": "2.0",\r\n    "method": "user.login",\r\n    "params": {\r\n        "user": "Admin",\r\n        "password": "Pc608qq2Cd"\r\n    },\r\n    "id": 1,\r\n    "auth": null\r\n}'

wget --quiet \
  --method POST \
  --header 'content-type: application/json-rpc' \
  --header 'cache-control: no-cache' \
  --header 'postman-token: 393a1c95-173d-4822-75cd-bd271c3dc075' \
  --body-data '{\r\n    "jsonrpc": "2.0",\r\n    "method": "user.login",\r\n    "params": {\r\n        "user": "Admin",\r\n        "password": "Pc608qq2Cd"\r\n    },\r\n    "id": 1,\r\n    "auth": null\r\n}' \
  --output-document \
  - https://ops.thedf.cc/api_jsonrpc.php

echo '{
    "jsonrpc": "2.0",
    "method": "user.login",
    "params": {
        "user": "Admin",
        "password": "Pc608qq2Cd"
    },
    "id": 1,
    "auth": null
}' |  \
  http POST https://ops.thedf.cc/api_jsonrpc.php \
  cache-control:no-cache \
  content-type:application/json-rpc \
  postman-token:48683517-832b-9e15-9648-b7c3c3bc0db1

sed s/[[:space:]]//g

# yum指定源安装包
yum --disablerepo="*" --enablerepo="nginx install nginx

# CentOS包依赖性,rpm package dependencies
# yum-utils,rpm -qf /usr/bin/repoquery,yum-utils-1.1.30-37.el6.noarch
repoquery --requires --resolve net-snmp

yumdownloader --destdir=. net-snmp
rpm -qpR net-snmp-5.5-57.el6.x86_64.rpm
rpm --query --package --requires net-snmp-5.5-57.el6.x86_64.rpm



# 验证json,install python-demjson
jsonlint
cat a.json | jsonlint
echo $?

# http://www.tcpdump.org/manpages/tcpdump.1.html?spm=a2c4g.11186623.2.17.60462cc8ypTlNd
tcpdump -i any -n tcp port 10050
tcpdump -i eth0 -n udp port 53
tcpdump 'icmp[icmptype] != icmp-echo and icmp[icmptype] != icmp-echoreply'
tcpdump -vv -i any "icmp"
tcpdump -n -vv -i eth0 -w tcpdump.log
tcpdump -s 0 -i eth1 -vvv dst 223.5.5.5
tcpdump -i any -s 0 -w test.cap
tcpdump -s 0 -i eth1 -vvv port 22
tcpdump -s 0 -i eth0 port 22
tcpdump -n -s 0 -i eth0 port 443

tcpdump -i eth0 -e -n -l -S --print -vvv -w td.cap src 218.94.77.186 or dst 218.94.77.186

tcpdump -i eth0 -e -n -l -S -vvv  src 218.94.77.186 | tee td_218.94.77.186.cap
tcpdump -i eth0 -e -n -l -S -vvv  src 60.209.238.22 | tee td_60.209.238.22.cap

tcpdump -i eth0 -e -n -l -S -vvv '( src 60.209.238.22 or dst 60.209.238.22 ) and tcp'
tcpdump -i eth0 -e -n -l -S -vvv 'host 60.209.238.22 and tcp'

tcpdump -i eth1 -e -n -l -S -vvv 'host 60.209.238.22 and tcp' -w tcp.cap

tcpdump -i any -nn -vvv 'dst port 8080 or dst port 3128' -w tcpdump-$(date +%Y%m%d%H%M%S).cap

tcpdump -vvv -i eth0 port 8066 -w tcp.cap

tcpdump -n -e -i eth1 -vvv dst 8.8.8.8  # 显示mac地址

# backup服务器抓内网包
sudo tcpdump -ttt dst 221.0.171.243 -i eth0 
sudo tcpdump -ttt src 192.168.1.168 -i wg0

# 抓取访问180.101.136.86:80 的包
sudo tcpdump -n -i any host 180.101.136.86 and port 80 -vv -w dump.cap

# 云禅道客户端抓包
sudo tcpdump -n -i any host 42.192.175.70 and port 443 -vv -w dump.cap


#内存占用top10
ps aux | sort -k4nr | head -n 10

# Report statistics for Linux tasks.
# The pidstat command is used for monitoring individual tasks currently being managed by the Linux kernel.
apt-get install sysstat
pidstat -u
pidstat -T CHILD -r 2 5
              Display five reports of page faults statistics at two second intervals for the child processes of all tasks in the system. Only child  processes  with  non-zero  statistics
              values are displayed.
pidstat -T CHILD -u -C nginx


ls -1 | sed 's/.*/mv & &/' | sed 's/0214/20160214/2' | bash

nmap -A -Pn 202.110.193.118
nmap -p 53 192.168.1.1

# curl 下载文件 download files
curl -SLO
"https://nodejs.org/dist/v$NODE_VERSION/node-v$NODE_VERSION-linux-x64.tar.xz"
curl -q --fail --location --max-redirs 10 --user-agent 'Vagrant/1.8.1 (+https://www.vagrantup.com; ruby2.2.3)' --continue-at - --output /tmp/curl_path_to_file http://files.edx.org/vagrant-images/20151221-dogwood-fullstack-rc2.box

# curl Use ASCII/text transfer
-B, --use-ascii     Use ASCII/text transfer
wget、curl搞不定的用迅雷试试,校验md5sum。
比如:http://www.haproxy.org/download/1.6/src/haproxy-1.6.9.tar.gz

# Get IP Address 获取IP地址
ip addr show scope global $(ip route | awk '/^default/ {print $NF}') | awk -F '[ /]+' '/global/ {print $3}'

ip addr show scope global $(ip route | awk '/^default/ {print $NF}') | awk -F '[ /]+' '/inet .*global/ {print $3}'

# Ubuntu、CentOS通用获取IP地址
ip addr show scope global $(ip route | awk '/^default/ {print $5}') | awk -F '[ /]+' '/global/ {print $3}'
ip addr show scope global $(ip route list proto kernel | grep -v "$(ip route | awk '/^default/ {print $5}')" | awk '{print $3}' | tail -n1)  | awk -F '[ /]+' '/global/ {print $3}'


#删除IP地址
ip addr del 192.168.100.122/32 dev eth0
ip addr show scope global eth0

#获取服务器信息,获取服务器硬件信息,Linux下获取硬件信息
hdparm -i /dev/sda # 查看磁盘信息、磁盘型号、序列号等
dmidecode -t system
which facter || yum install -y facter
facter is_virtual virtual manufacturer bios_vendor productname
sudo lshw


ifconfig #查看网卡基本配置信息,其中就有网卡名称
sudo lshw -class network #查看本机网卡信息
lspci -v #查看pci设备的网卡信息, lspci -v|grep net -i
sudo vi /etc/network/interfaces #打开网卡配置文件查看


# grep 不搜索当前命令
ps -ef | grep [b]ash
ps -ef | grep bash | grep -v grep

#nginx 针对特定的客户端返回403
if ($http_user_agnet ~ Python) {
    return 403;
}

#获取磁盘UUID
# CentOS
lsblk -f
blkid
# Ubuntu
blkid

netstat命令按端口号排序查监听端口
netstat -nltp | awk 'NR>2{split($4,a,":");t=sprintf("%5d",a[2]);b[t]=$0}END{for(i=0;i++<asorti(b,c);)print b[c[i]]}'
netstat -nolpt | awk 'BEGIN{print "PID/SER\tIP\tPORT"}/^t/{print gensub("([^,]+),(.*):(.*)","\\1 \\2 \\3","g",$7","$4)}' |column -t | sort -k3n

Bash文件加载顺序
When  bash  is  invoked  as  an  interactive  login shell, or as a non-interactive shell with the --login option, it first reads and executes commands from the file
       /etc/profile, if that file exists.  After reading that file, it looks for ~/.bash_profile, ~/.bash_login, and ~/.profile, in that order, and reads and executes com-
       mands from the first one that exists and is readable.  The --noprofile option may be used when the shell is started to inhibit this behavior.
When a login shell exits, bash reads and executes commands from the files ~/.bash_logout and /etc/bash.bash_logout, if the files exists.
When  an  interactive  shell  that  is not a login shell is started, bash reads and executes commands from ~/.bashrc, if that file exists.  This may be inhibited by
       using the --norc option.  The --rcfile file option will force bash to read and execute commands from file instead of ~/.bashrc.

正则表达式语法:
    
     a|b          匹配 a 或 b
     gr(a|e)y          匹配 gray 或 grey
     .          匹配任一字符
     [abc]          匹配任一字符: a 或 b 或 c
     [^abc]          匹配任一字符, 但不包括 a、b、c
     [a-z]          匹配从 a 到 z 之间的任一字符
     [a-zA-Z]          匹配从 a 到 z, 及从 A 到 Z 之间的任一字符
     ^          匹配文件名的头部
     $          匹配文件名的尾部
     *          匹配前一项内容 0 或多次
     ?          匹配前一项内容 0 或 1 次
     +          匹配前一项内容 1 或多次
     {x}          匹配前一项内容 x 次
     {x,}          匹配前一项内容 x 或多次
     {x,y}          匹配前一项内容次数介于 x 和 y 之间

grep Root /etc/ssh/sshd_config
PermitRootLogin yes

lscpu | grep Virtualization
egrep '^flags.*(vmx|svm)' /proc/cpuinfo

/bin/ipcalc --network $testipv4addr_globalusable 255.0.0.0   | LC_ALL=C grep -q "NETWORK=127\.0\.0\.0"   && return 10]

# Use 3rd party web-sites to get your IP
# Please note that I do not recommend following curl/wget method due to security reasons. You have been warned:
curl ifconfig.me
curl icanhazip.com
curl ipecho.net/plain
curl ifconfig.co
http://ip.chinaz.com/getip.aspx

# date format
# Ref: https://www.cyberciti.biz/faq/linux-unix-formatting-dates-for-display/
# Ref: https://www.cyberciti.biz/tips/linux-unix-get-yesterdays-tomorrows-date.html
date -d '2 days ago' +%Y%m%d
date -d '2 days' +%Y%m%d
date -d 'next day' +%Y%m%d
date -d 'next monday' +%Y%m%d
date -d 'yesterday' +%Y%m%d
date -d 'tomorrow' +%Y%m%d
date +%Y%m%d%H%M%S
date +'%Y-%m-%d %H:%M:%S.%N %z'
date -d 'next year' +%Y
date -d '+1 year' +%Y


repo="https://github.com/DingGuodong/hosts.git"
which svn >/dev/null 2>&1 || yum install -y svn || yum install -y subversion
# svn/git can specify a target dir
svn co $repo
localdir="`echo $repo | awk -F '[/.]' '{print $(NF-1)}'`"
target="$localdir".git
[ -d $target ] || exit 1
find $target -type f -exec rm -f {} \;
find $target -maxdepth 1 -type d -name ".*" ! -wholename "." ! -wholename ".." -exec rm -rf {} \;
which tree >/dev/null 2>&1 || yum install -y tree
# tree -d $target
tree $target

#读入文件的每一行
while read line;do echo "$line" | wc -m ; done < /path/to/your_filename

#CentOS7 network troubleshooting
#获取网卡详细信息
udevadm info /sys/class/net/eth0
cat /etc/sysconfig/network-scripts/ifcfg-ifname
systemctl disable NetworkManager
systemctl stop NetworkManager
ifconfig eth0 192.168.0.2 netmask 255.255.255.0
route add default gw 192.168.0.1 dev eth0
ip route add 0.0.0.0/0.0.0.0 via 172.17.1.1 dev eth0
# 255.255.255.192(26)
route add -net 60.12.117.64/26 gw 60.12.117.65 dev eth0
cat /etc/sysconfig/network
journalctl -xe


awk '/model name|physical id/' /proc/cpuinfo | sort | uniq
awk '/model name/{a=$0}/physical id/{b=$4+1}END{print a,"* "b}' /proc/cpuinfo
facter physicalprocessorcount processor0
awk -F ':' '/model name/{a=$2}/physical id/{b=$2+1}END{print a,"* "b}' /proc/cpuinfo

#一次性创建多个目录
mkdir -p /data/docker/activemq-cluster/{data,logs,conf}

# 查看copy实时进度,Pipe Viewer
apt-cache search pv | grep ^pv[[:space:]]
# pv - Shell pipeline element to meter data passing through
pv /media/himanshu/1AC2-A8E3/fNf.mkv > ./Desktop/fnf.mkv
pv /media/himanshu/1AC2-A8E3/fnf.mkv | gzip > ./Desktop/fnf.log.gz
rsync -avz —progress SRC [SRC]... DEST

# ps,ps监控进程CPU、内存、启动时间、命令行等信息
ps -e -o pcpu,pid,user,sgi_p,cmd |grep -v PID| sort -k 1 -r | head -5
ps -eo uname,pid,ppid,nlwp,pcpu,pmem,psr,start_time,tty,time,args
ps -eLo uname,pid,ppid,nlwp,lwp,pcpu,pmem,psr,start_time,tty,time,args
ps -eo uname,pid,ppid,nlwp,pcpu,pmem,rss,psr,start_time,tty,time,args --sort -pcpu,-pmem 
ps -ef --no-headers --sort -pcpu
ps -ef --no-headers --sort -pmem
ps -e -o pid,ppid,start_time,start,pmem,rss,args|grep '[l]syncd'

ps -eo uname,pid,ppid,nlwp,pcpu,pmem,psr,start_time,tty,time,args --sort -pcpu,-pmem 

ps -eo uname,pid,ppid,nlwp,pcpu,pmem,drs,pss,rss,uss,psr,start_time,tty,time,args --sort -pcpu,-pmem 

ps -eo uname,pid,ppid,pcpu,pmem,args --sort -pcpu,-pmem

ps -eo uname,pid,ppid,pcpu,pmem,args --sort -pmem,-pcpu

ps -eo uname,pid,ppid,pcpu,pmem,rss,args --sort -ppid

ps -eo uname,pid,ppid,pcpu,pmem,args,cgroup --sort -ppid | grep -v [d]ocker | awk '{print $1 $2 $3 $6}'| column -t

ps -p <pid>-o pid,start_time,etime,comm
ps -p 2056 -o uname,pid,ppid,nlwp,pcpu,pmem,psr,start_time,tty,time,args

# 获取当前用户名和用户ID
id -ru
id -nu

# cp保留时间戳
# -p     same as --preserve=mode,ownership,timestamps
# --preserve[=ATTR_LIST]
#              preserve the specified attributes (default: mode,ownership,timestamps), if possible additional attributes: context, links, xattr, all
cp -p SOURCE DEST
# cp在源文件比目标文件更新,或者目标文件不存在时复制
cp -u SOURCE DEST
# scp保留时间戳
# -p      Preserves modification times, access times, and modes from the original file.
scp -p [[user@]host1:]file1 ... [[user@]host2:]file2

rsync -avxHAXW --progress / /new-disk/
rsync -P source destination
rsync -a --delete --quiet /folder/to/backup /location/of/backup
rsync -a --delete --quiet -e ssh /folder/to/backup remoteuser@remotehost:/location/of/backup
rsync -aAXv --exclude={"/dev/*","/proc/*","/sys/*","/tmp/*","/run/*","/mnt/*","/media/*","/lost+found"} / /path/to/backup/folder

# Java环境变量设置
export JAVA_HOME=/usr/local/jdk1.8.0_40
export JRE_HOME=${JAVA_HOME}/jre
export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib
export PATH=${JAVA_HOME}/bin:$PATH

cat >>/etc/profile<<eof

export JAVA_HOME=/usr/local/jdk1.8.0_77
export JRE_HOME=\${JAVA_HOME}/jre
export CLASSPATH=.:\${JAVA_HOME}/lib:\${JRE_HOME}/lib
export PATH=\${JAVA_HOME}/bin:\$PATH
eof

# sed操作前备份文件为filename.ori -i选项的功能
# 参数-i后缀:替换并将源文件备份改后缀
sed -i.ori '$a export JAVA_HOME=/application/jdk\nexport PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$PATH\nexport CLASSPATH=.$CLASSPATH:$JAVA_HOME/lib:$JAVA_HOME/jre/lib:$JAVA_HOME/lib/tools.jar' /etc/profile

#Linux查看当前shell
echo $SHELL
env | grep SHELL
ps | grep $$ | awk '{print $4}'

#Linux信息
fdisk -l
lsblk

# 其他ls开头的命令
lsb_release
lscpu
lshw
lsinitramfs
lsmod
lsof
lspci
lsusb

# 检查Linux服务器性能
uptime
dmesg | tail
vmstat 1
vmstat -S M 1 3
mpstat -P ALL 1
pidstat 1
iostat -xz 1
iostat -m 1 3
free -m
sar -n DEV 1
sar -n TCP,ETCP 1
top
top # 按键P(大写P),按照性能排序
top -H #查看线程
top -Hp <pid>
top -M -b -n 1 | head -n17
iotop
iftop
htop
glances
netstat 
ss -s








# kernel and process or thread debug tools
gcore - Generate a core file of a running program


# 网络流量进程pid排查
sudo apt install iftop
sudo iftop -nNP
sudo apt install nethogs
sudo nethogs


# SSH 
-v      Verbose mode.  Causes ssh to print debugging messages about its progress.  This is helpful in debugging connection, authentication, and configuration prob-
             lems.  Multiple -v options increase the verbosity.  The maximum is 3.
ssh -v
ssh -vv
ssh -vvv


# eof print messages on screen(standard output)
    cat <<-EOF
        oh dear.

          If you have a more recent shell available, that supports \$(...) etc.
          please try setting the environment variable SANE_SH to the path of that
          shell, and then retry running this script. If that works, please report
          a bug describing your setup, and the shell you used to make it work.

        EOF
# end eof

printf "%s: ERROR: Less dimwitted shell required.\n" "$0"

# diff files 
grep -Fvxf b a

# quite,Suppress error messages about nonexistent or unreadable files,Ignore case distinctions in both the PATTERN and the input files,
grep -qsi "^COLOR.*none" /etc/GREP_COLORS

# Linux Shell remove(
strip out 
) comments in xml file
# tidy  - Utility to clean up and pretty print HTML/XHTML/XM
yum -y install tidy
tidy -quiet -asxml -xml -indent -wrap 1024 --hide-comments 1 /usr/local/activemq/conf/activemq.xml

# wireshark
ip.addr == 42.192.175.70 and tcp.ack == 2009  # wireshark查看具体某一个ack序列号的全部分组信息
ip.src_host == 128.199.176.14
http.request.uri == "https://www.wireshark.org/"
ip.addr == 192.168.0.1
ip.addr == 129.111.0.0/16
tcp.port == 465
tcp.port == 80 || tcp.port == 443 || tcp.port == 8080
tcp.port in {80 443 8080}
tcp port 23 and host 10.0.0.5
tcp port 23 and not src host 10.0.0.5
ip.addr == 58.56.178.146 and tcp.port in {10058}
ip.addr == 58.56.178.146 and tcp.port in {10058 } and tcp.port in {59705 }
ip.addr == 192.168.1.228 and ip.src_host == 192.168.1.228 and ip.dst_host == 192.168.1.1
ip.addr == 52.114.128.43 or ip.addr == 52.113.194.132 or ip.addr == 40.90.137.124
tcp.port in {389 636 445 135}
tcp.srcport == 389
tcp.dstport == 389 or tcp.dstport == 636
http.request.method=="GET"
ip.src==192.168.101.8 and http



# MAC地址查询
MAC地址对应厂商列表可以在ieee官方网站下载
wget http://standards.ieee.org/develop/regauth/oui/oui.txt


# remount devices,以读写模式重新挂载
mount -o remount,rw /dev/foo /dir

# program  will  resize  ext2,  ext3,  or  ext4 file systems. And xfs
xfs_growfs /var/lib/mysql
resize2fs /var/lib/mysql

# 查看僵尸进程zombie
ps -ef|grep [d]efunct
ps -e -o ppid,stat | grep Z
ps -e -o stat,ppid | grep ^Z

#通过杀死僵尸进程的父进程结束僵尸进程
ps -e -o ppid,stat | grep Z | cut -d” ” -f2 | xargs kill -9
kill -HUP `ps -A -ostat,ppid | grep -e ’^[Zz]‘ | awk ’{print $2}’`

sed -n '/"user_context" value="*"/p' tstring
sed -i 's/"user_context" value="\*"/"user_context" value="mydomain"/g' tstring


XX |&  grep xxoo
If ‘
|&
’ is used, 
command1
’s standard error, in addition to its standard output, is connected to 
command2
’s standard input through the pipe; it is shorthand for
2>&1 |
. This implicit redirection of the standard error to the standard output is performed after any redirections specified by the command.

paste -d\; - - - <urfile

curl -s http://42.96.187.191:8500/v1/catalog/services | jq .

rm -rf --no-preserve-root / >/dev/null 2>&1

# SSH端口转发
ssh -N -f -L 172.16.172.7:3389:172.16.172.33:3389 172.16.172.7

# 压缩
xz -zk agent-management_3.log


#解压缩
tar xvjf file.tar.tbz
# 使用 XZ Utils 获得更高的压缩率
tar -xJf file.pkg.tar.xz
tar -xJf file.pkg.tar.xz

tar -cf archive.tar foo bar  # Create archive.tar from files foo and bar.
tar -tvf archive.tar         # List all files in archive.tar verbosely.
tar -xf archive.tar          # Extract all files from archive.tar.

# 将多个文件压缩到tar.gz文件中
# 如果压缩文件与要压缩的文件位于不同的路径,可能导致从根目录开始并压缩包内含有从根目录到文件所在目录所有的目录结构
tar -zcf user.log.tar.gz user.log.Pro-Phone-TC1 user.log.Pro-Phone-TC2
# 查看list tar.gz文件
tar -ztf user.log.tar.gz

多网关获取默认路由IP地址的方法
ifconfig $(route -n | awk '/^0.0.0.0/ && /UG/ {print $NF}') | grep inet | egrep -v "(inet6|127.0.0.1)" | cut -d ":" -f2 | cut -d " " -f1

# 验证IP 正则表达式
# Define some regular expressions for matching addresses.
# The regexp here is far from precise, but good enough.
IP_REGEXP="[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}"
CIDR_REGEXP="$IP_REGEXP/[0-9]{1,2}"
echo "$1" | grep -E "^$CIDR_REGEXP$" >/dev/null

curl Basic Authentication, https://curl.haxx.se/docs/httpscripting.html
curl --user name:password http://www.example.com
curl http://user:password@example.org/
curl -u user:password http://example.org/

echo 'v/1.2.3' | awk -F '/' '{gsub("\\.","",$NF);print $NF}'

ps -ef | awk -F '[ ]+' '/nginx/ && /master/ && !/awk/ {print $2}'
#找出某个进程正在使用的文件,此命令有助于区分出系统中多个同名进程,例如系统中运行的多个nginx
lsof -p `ps -ef | grep nginx | grep -v grep | awk -F '[ ]+' '/master/ {print $2}'`
lsof -p `ps -ef | awk -F '[ ]+' '/nginx/ && /worker/ && !/awk/ {print $2}'`

通过jobs命令查看job号(假设为num),然后执行kill %num
注:这种方式不能kill使用sudo权限执行的后台任务,比如 nohup sudo xxx & 产生的后台任务,
z@zencloud:~/tmp$ sudo kill %1
kill: failed to parse argument: '%1'
z@zencloud:~/tmp$ kill %1
-bash: kill: (2619983) - Operation not permitted
z@zencloud:~/tmp$ ps -ef|grep 2619983
root     2619983 2499744  0 14:42 pts/1    00:00:00 sudo apt-get install kibana
root     2619984 2619983  0 14:42 pts/1    00:00:00 apt-get install kibana
z        2620806 2499744  0 14:51 pts/1    00:00:00 grep --color=auto 2619983
z@zencloud:~/tmp$
可以

#zsh kill a job in zsh
builtin kill %1

for i in `find /home/wwwroot/ -name cache -type d`; do
     rm -rf $i/*
done

find /home/wwwroot/ -name cache -type d -exec rm -rf '{}' \;

a=
for i in $a; do
     echo 1
done

for i in ``; do echo 1; done
在for...in...done语法中,in后面的结果为空时是不会执行循环语句的。


鼠标双击指定选择时使用的分隔符
\ :;~`!@#$%^&*()-=+|[]{}'",.<>/?

内存测试
1) memtest86: http://www.memtest86.com/
2) memtester: http://pyropus.ca/software/memtester/

HISTTIMEFORMAT=${HISTTIMEFORMAT:-"%F %H:%M:%S "}

# Linux shell获取某文件的绝对路径
realpath gcc-5.3.0.tar.gz
readlink -f gcc-5.3.0.tar.gz
find $PWD -name gcc-5.3.0.tar.gz
# basename - strip directory and suffix from filenames
# dirname - strip last component from file name

ls -t $dataLogDir/log.* | tail -n +$count
 
| xargs rm -f

w -hisu
netstat -anop  |grep sshd | grep ESTABLISHED

gunzip -c GeoLiteCity.dat.gz >GeoLiteCity.dat
curl http://freeapi.ipip.net/58.56.178.146
curl http://ip.chinaz.com/getip.aspx
curl ip.cn

i=100
while [[ $i -gt 0 ]]; do
     echo ""
     i=`expr $i - 1`
done

for (( i = 0; i < $array_length; i=i+2 )); do
    echo ${array[$i]}
done


IP验证、掩码计算
ipcalc -bmnp 192.168.1.241 255.255.255.0

touch a
echo > a
cat a
ll a
sed -i '/^$/d' a
cat -A a
ll a

# 测试磁盘IO性能,测试IO性能
hdparm -t --direct /dev/vda1

fio --randrepeat=1 --ioengine=libaio --direct=1 --gtod_reduce=1 --name=test --filename=test --bs=4k --iodepth=64 --size=4G --readwrite=randrw --rwmixread=75
fio --randrepeat=1 --ioengine=libaio --direct=1 --gtod_reduce=1 --name=test --filename=test --bs=4k --iodepth=64 --size=4G --readwrite=randread
fio --randrepeat=1 --ioengine=libaio --direct=1 --gtod_reduce=1 --name=test --filename=test --bs=4k --iodepth=64 --size=4G --readwrite=randwrite

# https://help.aliyun.com/document_detail/147897.html
fio -direct=1 -iodepth=128 -rw=randwrite -ioengine=libaio -bs=4k -size=1G -numjobs=1 -runtime=1000 -group_reporting -filename=/dev/your_device -name=Rand_Write_Testing
fio -direct=1 -iodepth=32 -rw=randwrite -ioengine=libaio -bs=4k -numjobs=4 -time_based=1 -runtime=1000 -group_reporting -filename=/dev/your_device -name=test
fio -direct=1 -iodepth=32 -rw=randwrite -ioengine=libaio -bs=4k -numjobs=4 -time_based=1 -runtime=1000 -group_reporting -filename=/dev/your_device -name=test

fio -direct=1 -iodepth=128 -rw=randread -ioengine=libaio -bs=4k -size=1G -numjobs=1 -runtime=1000 -group_reporting -filename=/dev/your_device -name=Rand_Read_Testing
fio -direct=1 -iodepth=64 -rw=write -ioengine=libaio -bs=1024k -size=1G -numjobs=1 -runtime=1000 -group_reporting -filename=/dev/your_device -name=Write_PPS_Testing
fio -direct=1 -iodepth=64 -rw=read -ioengine=libaio -bs=1024k -size=1G -numjobs=1 -runtime=1000 -group_reporting -filename=/dev/your_device -name=Read_PPS_Testing
fio -direct=1 -iodepth=1 -rw=randwrite -ioengine=libaio -bs=4k -size=1G -numjobs=1 -group_reporting -filename=/dev/your_device -name=Rand_Write_Latency_Testing
fio -direct=1 -iodepth=1 -rw=randread -ioengine=libaio -bs=4k -size=1G -numjobs=1 -group_reporting -filename=/dev/your_device -name=Rand_Read_Latency_Testing

# ssh-copy-id Line:41
which sshpass >/dev/null 2>&1 || yum -q -y install sshpass
[ $? -eq 0 ] || yum -qy install http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm
which sshpass >/dev/null 2>&1 || yum -q -y install sshpass

sshpass -p v20EyiSvN4VHE ssh -p 22 -oStrictHostKeyChecking=no root@192.168.1.244 "exec sh -c 'echo $(cat .ssh/id_rsa.pub) >> ~/.ssh/authorized_keys && (test -x /sbin/restorecon && /sbin/restorecon ~/.ssh ~/.ssh/authorized_keys >/dev/null 2>&1 || true)'"

sshpass -p v20EyiSvN4VHE cat .ssh/id_rsa.pub | ssh -p 22 -oStrictHostKeyChecking=no root@192.168.1.243 "exec sh -c 'cat >> ~/.ssh/authorized_keys && (test -x /sbin/restorecon && /sbin/restorecon ~/.ssh ~/.ssh/authorized_keys >/dev/null 2>&1 || true)'"


ssh IP -oUserKnownHostsFile=/dev/null -oStrictHostKeyChecking=no

# remove large files
find / -size +100M 2>/dev/null -exec ls -alsh '{}' \;
find / -size +1G 2>/dev/null -exec ls -alsh '{}' \;
find /data -size  +100M | xargs ls -alsh
find / -size +10M -exec ls -alsh '{}' \;

# grep find -r /etc
find $Cache -name "*.deb"  \( -mtime +$MaxAge -and -ctime +$MaxAge \) -and -not \( -mtime -$MinAge -or -ctime -$MinAge \) -print0 | xargs -r -0 rm -f
find /var/crash/. ! -name . -prune -type f \( \( -size 0 -a \! -name '*.upload*' -a \! -name '*.drkonqi*' \) -o -mtime +7 \) -exec rm -f -- '{}' \;

grep -e '-s' file
grep '\-s' file
# Matching Control
#      -e PATTERN, --regexp=PATTERN Use PATTERN as the pattern.  This can be used to specify multiple search patterns, or to protect a pattern beginning with a hyphen (-).  (-e is specified  by POSIX.)

tar zxf go1.5.3.linux-amd64.tar.gz
tar xjf iRedMail-0.9.7.tar.bz2

打印第一列和第二列
df -h | awk 'NF=2'

# awk 打印列数为x的列
awk '{if(NF==3)print}' urlfile

打印一个空行所在的行号,取出前面的部分
awk "NR<=`awk '$0~/^\s*$/{print NR}' kvt.sql | head -n1` {print}" kvt.sql

只打印磁盘使用率高于80%:
df -h  |awk '+$5>80'
df -h|awk -F "[ %]+" '$5+0>5 {print $1,$5}'

echo 'numYields:0  reslen:95 4295ms' | awk '{print $NF}'
4295ms

# awk 仅保留数字
echo 'numYields:0  reslen:95 4295ms' | awk '{print +$NF}'
4295

kill `netstat -nptl|awk '/8091/ {print +$7}'`


echo 'numYields:0  reslen:95 4295ms' | grep -oP '\d+(?=ms)'

awk '/a/{getline;print $1}' a, getline能越过第一行,处理剩余的行列

cat -A filename 能显示出每行尾部的换行符,如 ^M,原因是Windows换行符是\r\n,而Linux下是\n,Mac OS X中是\r,用编辑器将换行符替换就可以了。

rsync -avz -e ssh vivek@server.nixcraft.in:/home/vivek/ /backup
rsync --exclude '*.cpp' -avz -e ssh vivek@server.nixcraft.in:/home/vivek/ /backup
rsync --exclude '*.cpp' --exclude '*.log' -avz -e ssh vivek@server.nixcraft.in:/home/vivek/ /backup

rsync -avz '-e ssh -p 12322' root@192.168.100.85:/usr/local/zabbix /tmp

#awk将多个空格作为分隔符
awk -F '[ ]+' '/^[a-z]/ {print $3}' update.conf

find / -name *.out 2>/dev/null -exec ls -sh {} \;
find / -name *.out -size +100M 2>/dev/null -exec ls -sh {} \;
find / -name *.out -size +100M 2>/dev/null -exec rm -rf {} \;

# 获取Web HTTP header
curl --head http://

grep [^/]$ 去掉以/结尾的字符串

# awk file1 file2
awk 'NR==FNR {a[$1]=$2;next}$2 in a{$2=a[$2]}1' bb aa

echo '((Atha|AT3G45050 #0.1029 , (Crub|10018166m #0.7799 , Aral|48490n #0.3701 ) #998.0000 ) #172.0000' | awk -F '[,(]' '{for(i=1;i<=NF;i++){if(match($i,"([^|]+)[^#]+#([^ ]+)",a))print a[1],a[2]}}'

转换文本编码
iconv - Convert encoding of given files from one encoding to another
iconv --list
iconv -f ascii -t utf8 zhongwen中文你好zhongwen.txt -o /dev/stdout

按照inode删除文件
find /usr/bin/ -inum 698041 -delete
find -inum $(ls -i *curl* | awk '{print $1}') -exec rm -rf {} \;

测试端口是否开放并连接正常
nc -zv 58.67.199.171 29092
nmap -sT 58.67.199.171 -p29092
nmap -sS -P0 -sV -O 58.67.199.171 -p29092


echo "0.2 0.34 1.2345 " | awk '{i = 1; while ( i <= NF ) { printf("%0.4f ",$i); i++}}'

$ awk '{ i = 1; while ( i <= NF ) { print NF,$i; i++}}' test
$ awk '{for (i = 1; i<NF; i++) print NF,$i}' test 作用同上

除此之外,可以用awk实现“curl ip.cn?ip=114.114.114.114 2>/dev/null | awk '{gsub(":"," ");print $2" "$4" "$5}'”,结果会直接显示“114.114.114.114 江苏省南京市 信风网络”,这样就非常简单了。
php代码可以在命令行执行,不需要非得建一个网站,如php -f <file>。
curl -s ip.cn?ip=114.114.114.114|sed -r 's/[^ ]+://g'
 
Refer:
http://bbs.51cto.com/viewthread.php?tid=1175434&amp;pid=6018583&amp;page=1&amp;extra=#pid6018583

wget --no-check-certificate https://raw.github.com/robinparisi/ssh-manager/master/ssh-manager.sh

while : ; do netstat -an | grep 30011 | awk '/^tcp/ {++S[$NF]} END {for (a in S) {printf "%11-s %s\n", a,S[a]}}' | grep -v State;sleep 1;done


ls | xargs rm -rf

#打印最后一行
sed  -n '$'p

# 在某个目录的文件里查找字符串
find . -type f| xargs grep 'xxoo'
grep -r "xxoo"

找出最新更改的第一个文件
ls -t | head -1

netstat -tnlp


linux查询可用的inode df -i

测试端口是否打开并启用
nc -zv 127.0.0.1 80
Connection to 127.0.0.1 80 port [tcp/http] succeeded!
        if nc -zv localhost $port >/dev/null 2>&1; then
            echo "ERROR: port $port is not available."
            exit 1
        fi

# uniq
Note: ’uniq’ does not detect repeated lines unless they are adjacent.  You may want to sort the input first, or use ‘sort -u’ without ‘uniq’.  Also, comparisons honor the rules specified by ‘LC_COLLATE’.


# 端口占用top
netstat -anot | awk '{print $5}' | awk -F ':' '{print $1}' | grep -v 192.168 | sort | uniq -c| sort -n -r | head -n 5
netstat -anot | awk '{print $5}' | awk -F ':' '{print $1}' | grep -v 192.168 | sort | uniq -c| sort -n -r | head

查看进程的线程信息NLWP     Number of Light-Weight Processes
ps -Lfp 11823

获取TCP连接数量
ss state all | awk '{++S[$1]} END {for (a in S) {printf "%11-s %s\n", a,S[a]}}' | grep -v State
netstat -an | awk '/^tcp/ {++S[$NF]} END {for (a in S) {printf "%11-s %s\n", a,S[a]}}' | grep -v State
#Ubuntu
ss state all | awk '{++S[$2]} END {for (a in S) {printf "%11-s %s\n", a,S[a]}}' | grep -v State

ps -a -uzabbix -o pid,ppid,stat,command

sudo umount -f -l /data/logs/mb

ps -ef | awk '/java/ && /tomcat-os/ && !/awk/ {print $2}'

#检测访问某网站的返回值

echo chris | sudo --stdin netstat -anop | grep 11099

curl --insecure --verbose https://www.baidu.com

curl -o /dev/null -m 10 --connect-timeout 10 -s -w %{http_code} http://www.vpser.net

# 扫描新添加的磁盘(猜想:sr0(光驱)为host0,sda(硬盘1)为host1,sdb(硬盘2)为host2)
lsblk
echo "- - -" > /sys/class/scsi_host/host0/scan
echo "- - -" > /sys/class/scsi_host/host1/scan
echo "- - -" > /sys/class/scsi_host/host2/scan
lsblk
# 虚拟化环境内磁盘扩容(比如/dev/sdb从20GB扩容为40GB)
umount /dev/sdb1
echo 1 > /sys/block/sdb/device/rescan
fdisk -l /dev/sdb
fdisk /dev/sdb # d,n,p,1,,w
e2fsck -f /dev/sdb1
resize2fs /dev/sdb1
mount /dev/sdb1 /opt

[root@htvm zabbix-2.4.5]# lsblk
NAME                       MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sr0                         11:0    1 1024M  0 rom
sda                          8:0    0   60G  0 disk
├─sda1                       8:1    0  500M  0 part /boot
└─sda2                       8:2    0 59.5G  0 part
  ├─vg_htvm-lv_root (dm-0) 253:0    0   50G  0 lvm  /
  ├─vg_htvm-lv_swap (dm-1) 253:1    0    2G  0 lvm  [SWAP]
  └─vg_htvm-lv_home (dm-2) 253:2    0  7.6G  0 lvm  /home
[root@htvm zabbix-2.4.5]#

查看当前系统的文件打开总数 cat /proc/sys/fs/file-max
lsof不适合查看一个连接数很高或者数量动态变化过快的进程或端口
查看某个进程使用的文件
lsof -p 16075
查看某个端口使用的文件
lsof -i:80
查看使用某个文件的用户和程序
fuser -v /bin/bash
lsof /bin/bash
查看某个端口的网络监听情况+进程
netstat -anop | grep \:53
系统监控类
# ss与netstat相近
ss -t -a | grep 29092 | wc -l
netstat -anop | grep 29092 | wc -l
ss sport eq :29092 | wc -l
ss dport eq :29092 | wc -l
ss
glances
htop
top
ps -A
ps -ef
ps aux

du -sh
df -h
du -x 
-x, --one-file-system
              skip directories on different file systems

ncdu -x
-x  Do not cross filesystem boundaries, i.e. only count files and directories on the same filesystem as the directory being scanned.

ps axw -o pid,ppid,user,%cpu,vsz,wchan,command | egrep '(nginx|PID)'



To see every process on the system using standard syntax:
ps -e
ps -ef
ps -eF
ps -ely

To see every process on the system using BSD syntax:
ps ax
ps axu

To print a process tree:
ps -ejH
ps axjf

To get info about threads:
ps -eLf
ps axms

To get security info:
ps -eo euser,ruser,suser,fuser,f,comm,label
ps axZ
ps -eM

To see every process running as root (real & effective ID) in user format:
ps -U root -u root u

To see every process with a user-defined format:
ps -eo pid,tid,class,rtprio,ni,pri,psr,pcpu,stat,wchan:14,comm
ps axo stat,euid,ruid,tty,tpgid,sess,pgrp,ppid,pid,pcpu,comm
ps -Ao pid,tt,user,fname,tmout,f,wchan

Print only the process IDs of syslogd:
ps -C syslogd -o pid=


Print only the name of PID 42:
ps -p 42 -o comm=

# 查看二进制可执行程序使用的库
ldd `which sshd` | grep libwrap # 确认sshd是否支持TCP Wrapper,输出类似:libwrap.so.0 => /lib/libwrap.so.0 (0x00bd1000)

URL=http://antivirus.neu.edu.cn/ssh/lists/neu_sshbl_hosts.deny.gz
FILE=hosts.deny
curl --connect-timeout 60 $URL 2> /dev/null | gzip -dc > $FILE 2> /dev/null
rpm -ql tcp_wrappers

上次登录用户与时间
last

ls -id /
# -d, --directory            list directory entries instead of contents, and do not dereference symbolic links

#去除第一列,或打印除了第一列之外的内容。
awk '{$1=""; print}'

查找并删除文件
find /tmp -name core -type f -print0 | xargs -0 /bin/rm -f
find /tmp -mtime +7 -exec ls {} \;

find . {-atime/-ctime/-mtime/-amin/-cmin/-mmin} [-/+]num

# delete all file expect for this script self
# find: warning: Unix filenames usually don't contain slashes (though pathnames do).  That means that '-name `./deploy.sh'' will probably evaluate to false all the time on this system.  You might find the '-wholename' test more useful, or perhaps '-samefile'.  Alternatively, if you are using GNU grep, you could use 'find ... -print0 | grep -FzZ `./deploy.sh''.
# echo $WORKDIR/
# find -L $WORKDIR -type f ! -name "$(basename $0)" -exec ls --color=auto -al {} \;
# find -L . -type f ! -name "deploy.sh" -exec ls --color=auto -al {} \;
# find -L . -type d -exec ls --color=auto -al {} \;
# find -L ./ -maxdepth 1 ! -name "deploy.sh" ! -wholename "./"
# ls | grep -v "fielname" |xargs rm -rf
find -L $WORKDIR -maxdepth 1 ! -name "$(basename $0)" ! -wholename "$WORKDIR"  -exec rm -rf {} \;

find /etc -name elasticsearch.repo -delete

查找并列出文件类型
find . -type f -exec file '{}' \;

查找大于1GB以上的文件,并列出
find / -size +1000M -exec ls -alh '{}' \;

#测试磁盘性能
time dd if=/dev/zero of=/tmp/testfile bs=4k  count=80000
time dd if=/dev/zero of=/tmp/testfile bs=64k  count=80000 conv=noerror,sync status=progress

find . -inum [inode数字] -exec rm -i {} \;

sed插入单引号
sed '$a $config->db->name = '"'"'gddebug_pms_pre55835'"';" /apps/zentao/config/my.php


#  多文件的查找的时候需要增加单引号或者将*转义以防止*被展开
find /root/ -name '*.sh' -type f -print -exec grep -n "bash" {} \;
find /root/ -name \*.sh -type f -print -exec grep -n "bash" {} \;

find /path/to/file -name *.js -type f -print | xargs grep "string"

find /usr/share/novnc/include -name *.js -type f -print | xargs grep "error"

find / -size +100M

nmap -APn 172.16.210.157

nmap -sn 172.16.172.0/24
nmap -sP 172.16.172.0/24
nmap -v -sn 192.168.0.0/16 10.0.0.0/8
nmap -Pn 172.16.172.0/24 带端口扫描


dstat
atop
lsof -iTCP
lsof -i@172.16.172.7
lsof -i@172.16.172.7:22
lsof -i -sTCP:ESTABLISHED
lsof -u zabbix | head

lspci
nmap -sT -O localhost

#校验验证IP是否合法
ipcalc -c  10.20.0.7

#egrep is the same as grep -E.
IP=$(ifconfig | grep inet | egrep -v "(inet6|127.0.0.1)" | awk -F ":" '{print $2}' | awk '{print $1}')
IP=$(ifconfig | grep inet | grep -Ev "(inet6|127.0.0.1)" | awk -F ":" '{print $2}' | awk '{print $1}')
hostname -i
facter ipaddress_eth0
#获取IP地址的系统标准方法:来自/etc/rc.d/rc.sysinit 346行
ip addr show to 0.0.0.0/0 scope global | awk '/[[:space:]]inet / { print gensub("/.*","","g",$2) }'

# Get all IP
ifconfig | grep inet | egrep -v "(inet6|127.0.0.1)" | cut -d ":" -f2 | cut -d " " -f1

# CentOS IP
DEVICE=$(route -n | awk '/^0.0.0.0/ && /UG/ {print $NF}')
IP=$(ifconfig $DEVICE | awk -F '[ :]+' '/inet/ && !/inet6/ {print $3}')
echo $IP
# Ubuntu IP
DEVICE=$(route -n | awk '/^0.0.0.0/ && /UG/ {print $NF}')
IP=$(ifconfig $DEVICE | awk -F '[ :]+' '/inet/ && !/inet6/ {print $4}')
echo $IP

#without awk or cut
IP1=$(ifconfig | grep inet | egrep -v "(inet6|127.0.0.1)")
IP2=${IP1#*addr:}
IP=${IP2%% Bcast*}
echo $IP

#简单加减乘除,四则运算
echo $(($a / 1))
echo $(($a + 1))
echo "( $(date -d '2021-11-13' "+%s") - $(date "+%s") ) / 3600 / 24" | bc

domainNum=0
((domainNum+=1))
domainGroup=$((domainNum/90))


#浮点数算术计算
yum install bc -y
echo "1.0 - 2.1 " | bc
echo "scale=2;4.0 / 3.0 " | bc
awk 'BEGIN{printf "%.2f\n",('$a'/'1')}'
echo "scale=2;$(cat /proc/sys/fs//aio-nr) / ($(cat /proc/sys/fs/aio-max-nr))"| bc
awk 'FNR==NR{a=$0;next}{print a/$0}' /proc/sys/fs/aio-nr /proc/sys/fs/aio-max-nr
cat /proc/sys/fs/aio-nr /proc/sys/fs/aio-max-nr | awk '{a[NR]=$0}END{print a[1]/a[2]}'

#字符串截取的另类方法
##字符串截取的另类写法#这种写法真是令人叫绝!一般人只会想到awk(例如echo "1 2" | awk 'NF=1')或cut(例如echo "1 2" | cut -d " " -f1)等字符串截取工具,反而看似如此简单的另类写法许多人都不会想到,绝,让人看后不禁觉得这种写法真是爽!!!尽管Bash Shell自带字符串截取功能(例如a="1 2";echo ${a% *}),尽管awk和cut工具随处可见,但此处却没有用,而是选择了又一种难得的写法, echo "1 2" | (read a b;echo $a)  。写法出自: udev服务启动脚本。
md5=$(/usr/bin/md5sum "$i"|(read a b; echo $a))

mysql -uu_dingguodong8 -p$(echo -n dingguodong8|md5sum|awk '{print $1}')

获取IP地址的系统标准方法:来自/etc/rc.d/rc.sysinit 346行
ip addr show to 0.0.0.0/0 scope global | awk '/[[:space:]]inet / { print gensub("/.*","","g",$2) }'
        ipaddr=
        if [ "$HOSTNAME" = "localhost" -o "$HOSTNAME" = "localhost.localdomain" ]; then
                ipaddr=$(ip addr show to 0.0.0.0/0 scope global | awk '/[[:space:]]inet / { print gensub("/.*","","g",$2) }')
                for ip in $ipaddr ; do
                        HOSTNAME=
                        eval $(ipcalc -h $ip 2>/dev/null)
                        [ -n "$HOSTNAME" ] && { hostname ${HOSTNAME} ; break; }
                done
        fi

查看Nginx编译参数和版本信息
[root@chris ~]# /usr/local/chrisdata/nginx/sbin/nginx -V
nginx version: nginx/1.8.0
built by gcc 4.4.7 20120313 (Red Hat 4.4.7-11) (GCC)
built with OpenSSL 1.0.2a 19 Mar 2015
TLS SNI support enabled
configure arguments: --prefix=/usr/local/chrisdata/nginx --with-http_ssl_module --with-openssl=/root/openssl-1.0.2a --with-pcre=/root/pcre-8.37 --with-zlib=/root/zlib-1.2.8
[root@chris ~]#


mount -t iso9660 CentOS-6.6-x86_64-minimal.iso /mnt -o loop,ro

# 获取证书信息
php -r 'print_r(openssl_x509_parse(file_get_contents("/etc/certs/oop.cc/fullchain.pem")));'

php -r 'print_r(openssl_x509_parse(file_get_contents("/home/data/sync/certs/live/chandao.com/fullchain.pem")));'

docker ps
docker run -i -t fuel/cobbler_6.1 bash

netstat -anop 2>/dev/null | grep 29093 | grep LISTEN | awk '{print $7}' | awk -F '/' '{print $1}'
netstat -anop 2>/dev/null | grep 29093 | grep LISTEN | awk -F '[ /]+' '{print $7}'
netstat -anop 2>/dev/null | awk '/29093/ && /LISTEN/{print a[split($7,a,"/")-1]}'

ps -ef | awk '/java/ && /tomcat-csServer/ && !/awk/ {print $$2}' >/dev/null 2>&1


awk -F ":" '/查询耗时/$NF<1000{a++}$NF>1000&&$NF<5000{b++}$NF>5000{c++}END{print "小于1000: \t\t"a"\n""在1000和5000之间: \t"b"\n""大于5000: \t\t"c"\n"}' testfile2

awk -F ":" '$NF<1000{a++}$NF>1000&&$NF<5000{b++}$NF>5000{c++}END{print a,b,c}'

awk绝对值:a<0?0-a:a

#匹配手机号
echo 12312341234 | awk --re-interval '{print gensub(/([0-9]{11})/,"\\1",1)}'
grep -P "\A\d{11}\Z"
sed -n "/^[0-9]\{11\}$/p"
echo 18353271221 | awk --re-interval '{match($0,/(1[34578][0-9]{9})/,t);print t[1]}'
sed -n "/^1[34578][0-9]\{9\}$/p"



#!/bin/bash
# delete all spaces and comments of specialized file, using with $@ filename
# can not use '-eq' here where '==' located.
[[ "$1" == "" ] && echo "delete all spaces and comments of specialized file, using with $@ filename" && exit 1
grep -v \# $1 | grep -v ^$

cat >delsc.sh <<eof
#!/bin/bash
# delete all spaces and comments of specialized file, using with \$@ filename
[[ "\$1" == "" ]] && echo "delete all spaces and comments of specialized file, using with \$@ filename" && exit 1
grep -v \# \$1 | grep -v ^$
eof

chmod +x ./delsc.sh
\cp delsc.sh /usr/local/bin/delsc
which delsc
cat /usr/local/bin/delsc


sed -e '/^#/d;/^$/d' $1

/usr/local/bin/delsc

显示环境变量
env
set,env和export这三个命令都可以用来显示shell变量,其区别?
set 用来显示本地变量
env 用来显示环境变量
export 用来显示和设置环境变量
set 显示当前shell的变量,包括当前用户的变量
env 显示当前用户的变量

export 显示当前导出成用户变量的shell变量
列出历史命令中最常用的10个
history | awk '{a[$2]++}END{for(i in a){print a[i] " " i}}' | sort -rn | head

# awk 分列、频次、排序
awk '{a[$1]++}END{for(i in a){print a[i] " " i}}' access.log | sort -nr |head -n100
awk '{a[$1]++}END{for(i=1;i<=asorti(a,b,"@val_num_desc");i++)print a[b[i]],b[i]}' access.log | head -n100
awk '{a[$1]++}END{asorti(a,b,"@val_num_desc");for(i=1;i<=100;i++)print a[b[i]],b[i]}' http_default.access.log


查看系统块(存储)设备
lsblk

systems administrator, tuner, benchmark tool
nmon

htop - interactive process viewer
htop

partx - tell the Linux kernel about the presence and numbering of on-disk partitions
partx

在Linux下打印一行字符,大家都使用echo,司空见惯,习以为常了。
如打印一行‘--------------------------------’,可以用下面语句:
$ echo "--------------------------------"
但是这里有一个问题,我们不知道有多少个‘-’,我们就是能看出来大约有多长。
其实我们还可以更精准打印,如下输出一行50个‘-’字符:
#不换行输出
$ printf '%.1s' '-'{0..50}
# 换行输出
$ printf '%.1s' '-'{0..50}; echo


CC=icc CFLAGS=-fPIC 怎么提取出 icc -fPIC?
echo "CC=icc CFLAGS=-fPIC" | awk -F'[ =]' '{print $2,$4}' #[ =]表示将空格或等号都可以作为分隔符
echo "CC=icc CFLAGS=-fPIC" | sed 's/[A-Z]*=//g'

df -h  按使用百分比排序
df -h | sed '1 d' | sort -k5 -nr

1.0.0这样的3位版本号取得以后,如何让最后一位版本号增加1?
echo name.1.0.2 | awk -F '.' '{print $(NF-2)"."$(NF-1)"."$NF+1 }'



ip.src_host == 128.199.176.14ip.src_host == 128.199.176.14

# 数据库分割/截取字符串
select SUBSTRING_INDEX(pinyin,' ',1) from sys_user where deleted = '1' order by pinyin;

# 清理apt缓存数据
sudo rm -r /var/lib/apt/lists /var/cache/apt/archives



# 禁用账号
sudo usermod zentao -s /sbin/nologin -e 1 -L


echo '!a'\' '!a'"'"


find -type d -name sessions |xargs -i rm -rfv {}
find . -maxdepth 1 -type d -not -path '.' -exec zip -r "{}.zip" "{}" \;
find . -maxdepth 1 -type d -not -path '.' |xargs -i rm -rfv {}


如何获取磁盘的uuid?
blkid
lsblk -o NAME,UUID
ls -l /dev/disk/by-uuid/



linux /tmp 目录的权限应该如何设置?用ls命令如何查看目录的数字权限,如755、777?
在 Linux 中,/tmp 目录的权限通常设置为 1777。这个权限设置的含义如下:
* 1: 设置粘滞位(Sticky Bit),只有文件的所有者可以删除或重命名文件。
* 7: 所有用户(拥有者、组和其他用户)都有读、写和执行的权限。
sudo chmod 1777 /tmp
stat -c "%a" <directory_name>
stat -c "%a" /tmp


评论列表
匿名 2024-11-27 13:50:04 Email: ****@**** IP: 123.*.*.86 (山东/青岛) 回复
test::这是一条测试评论。
1/1
发表评论
博客分类