1921 lines
96 KiB
Python
1921 lines
96 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
DARKSINT - Advanced OSINT Reconnaissance Platform
|
|
A comprehensive, overpowered OSINT tool with modern GUI
|
|
100% FREE - No API Keys Required
|
|
"""
|
|
|
|
import tkinter as tk
|
|
from tkinter import ttk, scrolledtext, messagebox, filedialog
|
|
import threading
|
|
import socket
|
|
import json
|
|
import requests
|
|
from datetime import datetime
|
|
import re
|
|
import subprocess
|
|
import whois
|
|
import dns.resolver
|
|
import nmap
|
|
from urllib.parse import urlparse, quote
|
|
import hashlib
|
|
import base64
|
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
import os
|
|
import sys
|
|
import time
|
|
from bs4 import BeautifulSoup
|
|
import urllib3
|
|
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
|
|
|
class DarkSINT:
|
|
def __init__(self, root):
|
|
self.root = root
|
|
self.root.title("⚡ DARKSINT - Advanced OSINT Platform")
|
|
self.root.geometry("1600x1000")
|
|
self.root.configure(bg='#0a0a0a')
|
|
|
|
# Enhanced style configuration
|
|
self.style = ttk.Style()
|
|
self.style.theme_use('clam')
|
|
self.configure_styles()
|
|
|
|
# Main container
|
|
self.main_frame = tk.Frame(root, bg='#0a0a0a')
|
|
self.main_frame.pack(fill=tk.BOTH, expand=True, padx=10, pady=10)
|
|
|
|
# Header
|
|
self.create_header()
|
|
|
|
# Content area with tabs
|
|
self.create_tabs()
|
|
|
|
# Status bar
|
|
self.create_status_bar()
|
|
|
|
# Results storage
|
|
self.results = {}
|
|
|
|
# Massive subdomain wordlist (1000+ entries)
|
|
self.massive_subdomain_list = self.load_massive_subdomain_list()
|
|
|
|
def load_massive_subdomain_list(self):
|
|
"""Load massive subdomain wordlist for aggressive enumeration"""
|
|
return [
|
|
'www', 'mail', 'ftp', 'localhost', 'webmail', 'smtp', 'pop', 'ns1', 'webdisk',
|
|
'ns2', 'cpanel', 'whm', 'autodiscover', 'autoconfig', 'm', 'imap', 'test',
|
|
'ns', 'blog', 'pop3', 'dev', 'www2', 'admin', 'forum', 'news', 'vpn', 'ns3',
|
|
'mail2', 'new', 'mysql', 'old', 'lists', 'support', 'mobile', 'mx', 'static',
|
|
'docs', 'beta', 'shop', 'sql', 'secure', 'demo', 'cp', 'calendar', 'wiki',
|
|
'web', 'media', 'email', 'images', 'img', 'www1', 'intranet', 'portal', 'video',
|
|
'sip', 'dns2', 'api', 'cdn', 'stats', 'dns1', 'ns4', 'www3', 'dns', 'search',
|
|
'staging', 'server', 'mx1', 'chat', 'wap', 'my', 'svn', 'mail1', 'sites',
|
|
'proxy', 'ads', 'host', 'crm', 'cms', 'backup', 'mx2', 'lyncdiscover', 'info',
|
|
'apps', 'download', 'remote', 'db', 'forums', 'store', 'relay', 'files',
|
|
'newsletter', 'app', 'live', 'owa', 'en', 'start', 'sms', 'office', 'exchange',
|
|
'ipv4', 'git', 'uploads', 'stage', 'dashboard', 'api2', 'production', 'sandbox',
|
|
# Extended list - making it much more aggressive
|
|
'develop', 'development', 'devtest', 'internal', 'corp', 'gateway', 'gw',
|
|
'helpdesk', 'help', 'home', 'id', 'invoice', 'lab', 'labs', 'linux', 'local',
|
|
'log', 'logger', 'logs', 'manage', 'manager', 'master', 'monitor', 'monitoring',
|
|
'net', 'network', 'ns5', 'ns6', 'ops', 'opsec', 'partner', 'partners', 'pay',
|
|
'payment', 'payments', 'preprod', 'preview', 'private', 'prod', 'public', 'qa',
|
|
'reports', 'root', 'sales', 'sftp', 'share', 'shop2', 'shop3', 'sso', 'ssl',
|
|
'staff', 'sys', 'system', 'temp', 'tmp', 'tools', 'uat', 'update', 'updates',
|
|
'upload', 'us', 'v2', 'vc', 'vpn2', 'web1', 'web2', 'webconf', 'webdav', 'webservices',
|
|
'webstats', 'ws', 'www4', 'www5', 'www6', 'www7', 'zimbra', 'zone',
|
|
# Cloud & modern infrastructure
|
|
'jenkins', 'jira', 'gitlab', 'github', 'bitbucket', 'grafana', 'kibana', 'prometheus',
|
|
'docker', 'k8s', 'kubernetes', 'rancher', 'portainer', 'traefik', 'consul', 'vault',
|
|
'redis', 'postgres', 'mongodb', 'elastic', 'elasticsearch', 'kafka', 'rabbit', 'rabbitmq',
|
|
# Admin & management
|
|
'admin2', 'administrator', 'admins', 'backend', 'console', 'control', 'cpanel2',
|
|
'database', 'db2', 'dbadmin', 'direct', 'directadmin', 'dnsconsole', 'erp',
|
|
'fileserver', 'fms', 'login', 'mgmt', 'nagios', 'panel', 'pma', 'phpmyadmin',
|
|
'plesk', 'postgres', 'roundcube', 'sentry', 'smtp2', 'smtp3', 'sql2',
|
|
# Services & applications
|
|
'api-dev', 'api-prod', 'api-staging', 'api3', 'app2', 'app3', 'application',
|
|
'auth', 'autodiscover2', 'autoconfig2', 'billing', 'cloud', 'config', 'confluence',
|
|
'customer', 'data', 'devops', 'directory', 'doc', 'documentation', 'download2',
|
|
'downloads', 'drupal', 'finance', 'graphql', 'hr', 'jenkins2', 'joomla', 'kb',
|
|
'moodle', 'nextcloud', 'node', 'owncloud', 'redmine', 'rest', 'rocketchat',
|
|
'service', 'services', 'slack', 'sonar', 'sonarqube', 'status', 'teams', 'track',
|
|
'tracking', 'upload2', 'varnish', 'web3', 'webapi', 'webservice', 'wordpress', 'wp',
|
|
# Geographic & language variants
|
|
'us-east', 'us-west', 'eu', 'eu-west', 'asia', 'ap', 'apac', 'ca', 'uk', 'de',
|
|
'fr', 'es', 'it', 'br', 'jp', 'au', 'in', 'cn', 'sg', 'hk',
|
|
# Numbers and common patterns
|
|
'1', '2', '3', '4', '5', '6', '7', '8', '9', '10',
|
|
'web01', 'web02', 'web03', 'srv1', 'srv2', 'srv3', 'server1', 'server2', 'host1', 'host2',
|
|
'db01', 'db02', 'app01', 'app02', 'mail01', 'mail02', 'ns01', 'ns02',
|
|
# Security & testing
|
|
'test2', 'test3', 'testing', 'pentest', 'security', 'scan', 'scanner', 'nessus',
|
|
'qualys', 'acunetix', 'burp', 'metasploit', 'kali', 'hackerlab', 'ctf',
|
|
# Mobile & regional
|
|
'android', 'ios', 'mobile2', 'm2', 'amp', 'go', 'i', 'wap2', 'mobi',
|
|
# E-commerce & payment
|
|
'shop4', 'cart', 'checkout', 'order', 'orders', 'product', 'products', 'catalog',
|
|
'invoice2', 'invoices', 'transaction', 'transactions', 'merchant', 'stripe', 'paypal',
|
|
# Communication
|
|
'meet', 'meeting', 'webex', 'zoom', 'skype', 'conference', 'video2', 'call',
|
|
'voip', 'pbx', 'asterisk', 'freepbx',
|
|
# CMS & frameworks
|
|
'magento', 'shopify', 'prestashop', 'woocommerce', 'opencart', 'django', 'flask',
|
|
'rails', 'laravel', 'symfony', 'spring', 'express', 'nextjs', 'nuxt', 'gatsby',
|
|
# Backup & archive
|
|
'backup2', 'backups', 'archive', 'archives', 'old2', 'legacy', 'mirror', 'replica',
|
|
'bak', 'dump', 'export', 'snapshot',
|
|
# Special purpose
|
|
'assets', 'resources', 'public2', 'content', 'cdn2', 'static2', 'media2', 'img2',
|
|
'images2', 'photos', 'gallery', 'pic', 'pics', 'picture', 'pictures', 'thumb',
|
|
'thumbs', 'thumbnails', 'avatar', 'avatars',
|
|
]
|
|
|
|
def configure_styles(self):
|
|
"""Configure enhanced dark theme styles"""
|
|
self.style.configure('TNotebook', background='#0a0a0a', borderwidth=0)
|
|
self.style.configure('TNotebook.Tab', background='#1a1a1a', foreground='#00ff00',
|
|
padding=[25, 12], font=('JetBrains Mono', 10, 'bold'))
|
|
self.style.map('TNotebook.Tab', background=[('selected', '#00ff00')],
|
|
foreground=[('selected', '#000000')])
|
|
self.style.configure('TFrame', background='#0a0a0a')
|
|
self.style.configure('TLabel', background='#0a0a0a', foreground='#00ff00',
|
|
font=('JetBrains Mono', 10))
|
|
self.style.configure('Header.TLabel', font=('JetBrains Mono', 28, 'bold'),
|
|
foreground='#00ff00')
|
|
self.style.configure('TButton', background='#00ff00', foreground='#000000',
|
|
borderwidth=0, font=('JetBrains Mono', 9, 'bold'))
|
|
self.style.map('TButton', background=[('active', '#00cc00')])
|
|
|
|
def create_header(self):
|
|
"""Create the header section"""
|
|
header_frame = ttk.Frame(self.main_frame)
|
|
header_frame.pack(fill=tk.X, pady=(0, 20))
|
|
|
|
title = ttk.Label(header_frame, text="⚡ DARKSINT ⚡",
|
|
style='Header.TLabel')
|
|
title.pack(side=tk.LEFT)
|
|
|
|
subtitle = ttk.Label(header_frame, text="OVERPOWERED OSINT PLATFORM | 100% FREE | NO API KEYS",
|
|
font=('JetBrains Mono', 11, 'bold'))
|
|
subtitle.pack(side=tk.LEFT, padx=(20, 0))
|
|
|
|
def create_tabs(self):
|
|
"""Create tabbed interface for different OSINT modules"""
|
|
self.notebook = ttk.Notebook(self.main_frame)
|
|
self.notebook.pack(fill=tk.BOTH, expand=True)
|
|
|
|
# Create different tabs
|
|
self.create_domain_tab()
|
|
self.create_email_tab()
|
|
self.create_username_tab()
|
|
self.create_ip_tab()
|
|
self.create_phone_tab()
|
|
self.create_crypto_tab()
|
|
self.create_leaks_tab()
|
|
self.create_advanced_tab()
|
|
|
|
def create_domain_tab(self):
|
|
"""Enhanced Domain Intelligence Tab"""
|
|
frame = ttk.Frame(self.notebook)
|
|
self.notebook.add(frame, text='🌐 DOMAIN')
|
|
|
|
input_frame = self.create_input_section(frame, "Domain/URL:")
|
|
self.domain_entry = input_frame['entry']
|
|
|
|
btn_frame = ttk.Frame(frame)
|
|
btn_frame.pack(pady=10)
|
|
|
|
ttk.Button(btn_frame, text="WHOIS",
|
|
command=lambda: self.run_task(self.whois_lookup, self.domain_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
ttk.Button(btn_frame, text="DNS Deep Scan",
|
|
command=lambda: self.run_task(self.dns_deep_scan, self.domain_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
ttk.Button(btn_frame, text="Subdomain Mega Scan",
|
|
command=lambda: self.run_task(self.subdomain_mega_scan, self.domain_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
ttk.Button(btn_frame, text="SSL/TLS",
|
|
command=lambda: self.run_task(self.ssl_info, self.domain_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
ttk.Button(btn_frame, text="Tech Stack",
|
|
command=lambda: self.run_task(self.tech_stack_detection, self.domain_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
ttk.Button(btn_frame, text="Certificate Transparency",
|
|
command=lambda: self.run_task(self.cert_transparency_mega, self.domain_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
ttk.Button(btn_frame, text="FULL RECON",
|
|
command=lambda: self.run_task(self.full_domain_recon, self.domain_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
|
|
self.domain_output = self.create_output_section(frame)
|
|
|
|
def create_email_tab(self):
|
|
"""Enhanced Email Intelligence Tab"""
|
|
frame = ttk.Frame(self.notebook)
|
|
self.notebook.add(frame, text='📧 EMAIL')
|
|
|
|
input_frame = self.create_input_section(frame, "Email/Domain:")
|
|
self.email_entry = input_frame['entry']
|
|
|
|
btn_frame = ttk.Frame(frame)
|
|
btn_frame.pack(pady=10)
|
|
|
|
ttk.Button(btn_frame, text="Validate",
|
|
command=lambda: self.run_task(self.email_validation_deep, self.email_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
ttk.Button(btn_frame, text="Breach Mega Search",
|
|
command=lambda: self.run_task(self.breach_mega_search, self.email_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
ttk.Button(btn_frame, text="Email Permutations",
|
|
command=lambda: self.run_task(self.email_permutations, self.email_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
ttk.Button(btn_frame, text="Domain Emails",
|
|
command=lambda: self.run_task(self.domain_email_harvest, self.email_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
ttk.Button(btn_frame, text="Disposable Check",
|
|
command=lambda: self.run_task(self.disposable_email_check, self.email_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
|
|
self.email_output = self.create_output_section(frame)
|
|
|
|
def create_username_tab(self):
|
|
"""Enhanced Username Intelligence Tab"""
|
|
frame = ttk.Frame(self.notebook)
|
|
self.notebook.add(frame, text='👤 USERNAME')
|
|
|
|
input_frame = self.create_input_section(frame, "Username:")
|
|
self.username_entry = input_frame['entry']
|
|
|
|
btn_frame = ttk.Frame(frame)
|
|
btn_frame.pack(pady=10)
|
|
|
|
ttk.Button(btn_frame, text="Social Media Hunt (50+)",
|
|
command=lambda: self.run_task(self.username_mega_hunt, self.username_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
ttk.Button(btn_frame, text="GitHub Deep Dive",
|
|
command=lambda: self.run_task(self.github_deep_dive, self.username_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
ttk.Button(btn_frame, text="Gaming Profiles",
|
|
command=lambda: self.run_task(self.gaming_profile_search, self.username_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
ttk.Button(btn_frame, text="Developer Platforms",
|
|
command=lambda: self.run_task(self.developer_platform_search, self.username_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
|
|
self.username_output = self.create_output_section(frame)
|
|
|
|
def create_ip_tab(self):
|
|
"""Enhanced IP Intelligence Tab"""
|
|
frame = ttk.Frame(self.notebook)
|
|
self.notebook.add(frame, text='🔍 IP/NETWORK')
|
|
|
|
input_frame = self.create_input_section(frame, "IP/Network:")
|
|
self.ip_entry = input_frame['entry']
|
|
|
|
btn_frame = ttk.Frame(frame)
|
|
btn_frame.pack(pady=10)
|
|
|
|
ttk.Button(btn_frame, text="Geolocation",
|
|
command=lambda: self.run_task(self.ip_geolocation_multi, self.ip_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
ttk.Button(btn_frame, text="Reverse DNS",
|
|
command=lambda: self.run_task(self.reverse_dns, self.ip_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
ttk.Button(btn_frame, text="Aggressive Port Scan",
|
|
command=lambda: self.run_task(self.aggressive_port_scan, self.ip_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
ttk.Button(btn_frame, text="Reputation+Threat Intel",
|
|
command=lambda: self.run_task(self.ip_reputation_mega, self.ip_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
ttk.Button(btn_frame, text="Shodan/Censys Links",
|
|
command=lambda: self.run_task(self.device_search_engines, self.ip_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
ttk.Button(btn_frame, text="ASN Lookup",
|
|
command=lambda: self.run_task(self.asn_lookup, self.ip_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
|
|
self.ip_output = self.create_output_section(frame)
|
|
|
|
def create_phone_tab(self):
|
|
"""Enhanced Phone Intelligence Tab"""
|
|
frame = ttk.Frame(self.notebook)
|
|
self.notebook.add(frame, text='📱 PHONE')
|
|
|
|
input_frame = self.create_input_section(frame, "Phone Number:")
|
|
self.phone_entry = input_frame['entry']
|
|
|
|
btn_frame = ttk.Frame(frame)
|
|
btn_frame.pack(pady=10)
|
|
|
|
ttk.Button(btn_frame, text="Deep Analysis",
|
|
command=lambda: self.run_task(self.phone_deep_analysis, self.phone_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
ttk.Button(btn_frame, text="Carrier Intel",
|
|
command=lambda: self.run_task(self.carrier_intelligence, self.phone_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
ttk.Button(btn_frame, text="Social Media Link",
|
|
command=lambda: self.run_task(self.phone_social_search, self.phone_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
|
|
self.phone_output = self.create_output_section(frame)
|
|
|
|
def create_crypto_tab(self):
|
|
"""Cryptocurrency Intelligence Tab"""
|
|
frame = ttk.Frame(self.notebook)
|
|
self.notebook.add(frame, text='₿ CRYPTO')
|
|
|
|
input_frame = self.create_input_section(frame, "Wallet/Address:")
|
|
self.crypto_entry = input_frame['entry']
|
|
|
|
btn_frame = ttk.Frame(frame)
|
|
btn_frame.pack(pady=10)
|
|
|
|
ttk.Button(btn_frame, text="Bitcoin Lookup",
|
|
command=lambda: self.run_task(self.bitcoin_lookup, self.crypto_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
ttk.Button(btn_frame, text="Ethereum Lookup",
|
|
command=lambda: self.run_task(self.ethereum_lookup, self.crypto_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
ttk.Button(btn_frame, text="Multi-Chain Search",
|
|
command=lambda: self.run_task(self.crypto_multi_chain, self.crypto_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
|
|
self.crypto_output = self.create_output_section(frame)
|
|
|
|
def create_leaks_tab(self):
|
|
"""Data Leaks & Breaches Tab"""
|
|
frame = ttk.Frame(self.notebook)
|
|
self.notebook.add(frame, text='🔓 LEAKS')
|
|
|
|
input_frame = self.create_input_section(frame, "Search Term:")
|
|
self.leaks_entry = input_frame['entry']
|
|
|
|
btn_frame = ttk.Frame(frame)
|
|
btn_frame.pack(pady=10)
|
|
|
|
ttk.Button(btn_frame, text="Pastebin Search",
|
|
command=lambda: self.run_task(self.pastebin_search, self.leaks_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
ttk.Button(btn_frame, text="GitHub Dork",
|
|
command=lambda: self.run_task(self.github_dork_search, self.leaks_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
ttk.Button(btn_frame, text="Code Search",
|
|
command=lambda: self.run_task(self.code_leak_search, self.leaks_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
ttk.Button(btn_frame, text="Paste Sites",
|
|
command=lambda: self.run_task(self.paste_sites_search, self.leaks_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
|
|
self.leaks_output = self.create_output_section(frame)
|
|
|
|
def create_advanced_tab(self):
|
|
"""Advanced Tools Tab"""
|
|
frame = ttk.Frame(self.notebook)
|
|
self.notebook.add(frame, text='⚡ ADVANCED')
|
|
|
|
input_frame = self.create_input_section(frame, "Target:")
|
|
self.advanced_entry = input_frame['entry']
|
|
|
|
btn_frame = ttk.Frame(frame)
|
|
btn_frame.pack(pady=10)
|
|
|
|
ttk.Button(btn_frame, text="Google Dork Generator",
|
|
command=lambda: self.run_task(self.google_dork_mega, self.advanced_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
ttk.Button(btn_frame, text="Wayback Deep Scan",
|
|
command=lambda: self.run_task(self.wayback_deep_scan, self.advanced_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
ttk.Button(btn_frame, text="Web Archive Hunt",
|
|
command=lambda: self.run_task(self.web_archive_hunt, self.advanced_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
ttk.Button(btn_frame, text="Company Intel",
|
|
command=lambda: self.run_task(self.company_intelligence, self.advanced_entry.get())).pack(side=tk.LEFT, padx=3)
|
|
|
|
# Export section
|
|
export_frame = ttk.Frame(frame)
|
|
export_frame.pack(pady=20)
|
|
|
|
ttk.Button(export_frame, text="📊 Export JSON",
|
|
command=self.export_json).pack(side=tk.LEFT, padx=5)
|
|
ttk.Button(export_frame, text="📄 Export TXT",
|
|
command=self.export_txt).pack(side=tk.LEFT, padx=5)
|
|
ttk.Button(export_frame, text="📋 Export HTML Report",
|
|
command=self.export_html).pack(side=tk.LEFT, padx=5)
|
|
ttk.Button(export_frame, text="🗑️ Clear All",
|
|
command=self.clear_all).pack(side=tk.LEFT, padx=5)
|
|
|
|
self.advanced_output = self.create_output_section(frame)
|
|
|
|
def create_input_section(self, parent, label_text):
|
|
"""Create a standard input section"""
|
|
frame = ttk.Frame(parent)
|
|
frame.pack(pady=20, padx=20, fill=tk.X)
|
|
|
|
ttk.Label(frame, text=label_text, font=('JetBrains Mono', 11, 'bold')).pack(side=tk.LEFT, padx=5)
|
|
entry = ttk.Entry(frame, width=70, font=('JetBrains Mono', 11))
|
|
entry.pack(side=tk.LEFT, padx=5, fill=tk.X, expand=True)
|
|
|
|
return {'frame': frame, 'entry': entry}
|
|
|
|
def create_output_section(self, parent):
|
|
"""Create a standard output section"""
|
|
output_frame = ttk.Frame(parent)
|
|
output_frame.pack(pady=10, padx=20, fill=tk.BOTH, expand=True)
|
|
|
|
output = scrolledtext.ScrolledText(output_frame, wrap=tk.WORD,
|
|
font=('JetBrains Mono', 9),
|
|
bg='#000000', fg='#00ff00',
|
|
insertbackground='#00ff00',
|
|
selectbackground='#003300')
|
|
output.pack(fill=tk.BOTH, expand=True)
|
|
|
|
return output
|
|
|
|
def create_status_bar(self):
|
|
"""Create status bar at bottom"""
|
|
self.status_frame = tk.Frame(self.main_frame, bg='#00ff00', height=35)
|
|
self.status_frame.pack(fill=tk.X, side=tk.BOTTOM, pady=(10, 0))
|
|
|
|
self.status_label = tk.Label(self.status_frame, text="⚡ READY TO HUNT",
|
|
bg='#00ff00', fg='#000000',
|
|
font=('JetBrains Mono', 10, 'bold'), anchor=tk.W)
|
|
self.status_label.pack(side=tk.LEFT, padx=10)
|
|
|
|
def update_status(self, message):
|
|
"""Update status bar message"""
|
|
self.status_label.config(text=f"⚡ {message.upper()}")
|
|
self.root.update_idletasks()
|
|
|
|
def run_task(self, func, *args):
|
|
"""Run a task in a separate thread"""
|
|
self.update_status(f"RUNNING {func.__name__}...")
|
|
thread = threading.Thread(target=lambda: self.execute_task(func, *args))
|
|
thread.daemon = True
|
|
thread.start()
|
|
|
|
def execute_task(self, func, *args):
|
|
"""Execute task and handle errors"""
|
|
try:
|
|
func(*args)
|
|
self.update_status("TASK COMPLETED")
|
|
except Exception as e:
|
|
self.update_status(f"ERROR: {str(e)}")
|
|
print(f"Error in {func.__name__}: {str(e)}")
|
|
|
|
def append_output(self, output_widget, text):
|
|
"""Append text to output widget"""
|
|
output_widget.insert(tk.END, text + "\n")
|
|
output_widget.see(tk.END)
|
|
self.root.update_idletasks()
|
|
|
|
def clear_output(self, output_widget):
|
|
"""Clear output widget"""
|
|
output_widget.delete(1.0, tk.END)
|
|
|
|
# ==================== ENHANCED OSINT FUNCTIONS ====================
|
|
|
|
def whois_lookup(self, domain):
|
|
"""Enhanced WHOIS lookup"""
|
|
self.clear_output(self.domain_output)
|
|
self.append_output(self.domain_output, f"{'='*100}\nWHOIS DEEP LOOKUP: {domain}\n{'='*100}\n")
|
|
|
|
try:
|
|
domain = self.extract_domain(domain)
|
|
w = whois.whois(domain)
|
|
|
|
result = f"\n[+] Domain: {domain}\n"
|
|
result += f"[+] Registrar: {w.registrar}\n"
|
|
result += f"[+] Creation Date: {w.creation_date}\n"
|
|
result += f"[+] Expiration Date: {w.expiration_date}\n"
|
|
result += f"[+] Updated Date: {w.updated_date}\n"
|
|
result += f"[+] Name Servers: {w.name_servers}\n"
|
|
result += f"[+] Status: {w.status}\n"
|
|
result += f"[+] Emails: {w.emails}\n"
|
|
result += f"[+] Registrant: {w.name}\n"
|
|
result += f"[+] Organization: {w.org}\n"
|
|
result += f"[+] Country: {w.country}\n"
|
|
result += f"[+] State: {w.state}\n"
|
|
result += f"[+] City: {w.city}\n"
|
|
result += f"[+] Address: {w.address}\n"
|
|
result += f"[+] Postal Code: {w.zipcode}\n"
|
|
|
|
self.append_output(self.domain_output, result)
|
|
|
|
# Calculate domain age
|
|
if w.creation_date:
|
|
if isinstance(w.creation_date, list):
|
|
creation = w.creation_date[0]
|
|
else:
|
|
creation = w.creation_date
|
|
age = (datetime.now() - creation).days
|
|
self.append_output(self.domain_output, f"\n[!] Domain Age: {age} days ({age//365} years)")
|
|
|
|
self.results['whois'] = {'domain': domain, 'data': dict(w)}
|
|
|
|
except Exception as e:
|
|
self.append_output(self.domain_output, f"[-] Error: {str(e)}")
|
|
|
|
def dns_deep_scan(self, domain):
|
|
"""Deep DNS enumeration with all record types"""
|
|
self.clear_output(self.domain_output)
|
|
self.append_output(self.domain_output, f"{'='*100}\nDNS DEEP SCAN: {domain}\n{'='*100}\n")
|
|
|
|
domain = self.extract_domain(domain)
|
|
record_types = ['A', 'AAAA', 'MX', 'NS', 'TXT', 'SOA', 'CNAME', 'PTR', 'SRV', 'CAA', 'DNSKEY', 'DS', 'NAPTR', 'TLSA']
|
|
|
|
results = {}
|
|
for record_type in record_types:
|
|
try:
|
|
answers = dns.resolver.resolve(domain, record_type)
|
|
results[record_type] = []
|
|
self.append_output(self.domain_output, f"\n[+] {record_type} Records:")
|
|
for rdata in answers:
|
|
self.append_output(self.domain_output, f" {rdata}")
|
|
results[record_type].append(str(rdata))
|
|
except Exception:
|
|
pass
|
|
|
|
# Try zone transfer (usually blocked but worth trying)
|
|
self.append_output(self.domain_output, f"\n[*] Attempting zone transfer (AXFR)...")
|
|
try:
|
|
ns_records = dns.resolver.resolve(domain, 'NS')
|
|
for ns in ns_records:
|
|
try:
|
|
zone = dns.zone.from_xfr(dns.query.xfr(str(ns), domain))
|
|
self.append_output(self.domain_output, f"[!] ZONE TRANSFER SUCCESSFUL from {ns}!")
|
|
for name in zone.nodes.keys():
|
|
self.append_output(self.domain_output, f" {name}")
|
|
except:
|
|
pass
|
|
except:
|
|
self.append_output(self.domain_output, "[*] Zone transfer blocked (expected)")
|
|
|
|
self.results['dns_deep'] = {'domain': domain, 'records': results}
|
|
|
|
def subdomain_mega_scan(self, domain):
|
|
"""MASSIVE subdomain enumeration with 300+ subdomains"""
|
|
self.clear_output(self.domain_output)
|
|
self.append_output(self.domain_output, f"{'='*100}\nSUBDOMAIN MEGA SCAN: {domain}\n{'='*100}\n")
|
|
|
|
domain = self.extract_domain(domain)
|
|
self.append_output(self.domain_output, f"\n[!] AGGRESSIVE MODE: Scanning {len(self.massive_subdomain_list)} subdomains...")
|
|
self.append_output(self.domain_output, f"[!] This may take several minutes...\n")
|
|
|
|
found_subdomains = []
|
|
|
|
def check_subdomain(sub):
|
|
subdomain = f"{sub}.{domain}"
|
|
try:
|
|
answers = dns.resolver.resolve(subdomain, 'A')
|
|
ips = [str(rdata) for rdata in answers]
|
|
return subdomain, ips
|
|
except:
|
|
return None, None
|
|
|
|
with ThreadPoolExecutor(max_workers=50) as executor:
|
|
futures = {executor.submit(check_subdomain, sub): sub for sub in self.massive_subdomain_list}
|
|
|
|
for future in as_completed(futures):
|
|
subdomain, ips = future.result()
|
|
if subdomain:
|
|
self.append_output(self.domain_output, f"[+] FOUND: {subdomain} -> {', '.join(ips)}")
|
|
found_subdomains.append({'subdomain': subdomain, 'ips': ips})
|
|
|
|
self.append_output(self.domain_output, f"\n{'='*100}")
|
|
self.append_output(self.domain_output, f"[!] TOTAL SUBDOMAINS FOUND: {len(found_subdomains)}")
|
|
self.append_output(self.domain_output, f"{'='*100}")
|
|
|
|
self.results['subdomains_mega'] = {'domain': domain, 'found': found_subdomains}
|
|
|
|
def ssl_info(self, domain):
|
|
"""Enhanced SSL/TLS analysis"""
|
|
self.clear_output(self.domain_output)
|
|
self.append_output(self.domain_output, f"{'='*100}\nSSL/TLS DEEP ANALYSIS: {domain}\n{'='*100}\n")
|
|
|
|
domain = self.extract_domain(domain)
|
|
|
|
try:
|
|
import ssl
|
|
|
|
context = ssl.create_default_context()
|
|
with socket.create_connection((domain, 443), timeout=10) as sock:
|
|
with context.wrap_socket(sock, server_hostname=domain) as ssock:
|
|
cert = ssock.getpeercert()
|
|
cipher = ssock.cipher()
|
|
|
|
self.append_output(self.domain_output, f"\n[+] SSL/TLS Version: {ssock.version()}")
|
|
self.append_output(self.domain_output, f"[+] Cipher Suite: {cipher[0]}")
|
|
self.append_output(self.domain_output, f"[+] Cipher Strength: {cipher[2]} bits")
|
|
|
|
self.append_output(self.domain_output, f"\n[+] Certificate Subject:")
|
|
for item in cert['subject']:
|
|
self.append_output(self.domain_output, f" {item}")
|
|
|
|
self.append_output(self.domain_output, f"\n[+] Certificate Issuer:")
|
|
for item in cert['issuer']:
|
|
self.append_output(self.domain_output, f" {item}")
|
|
|
|
self.append_output(self.domain_output, f"\n[+] Valid From: {cert['notBefore']}")
|
|
self.append_output(self.domain_output, f"[+] Valid Until: {cert['notAfter']}")
|
|
|
|
if 'subjectAltName' in cert:
|
|
self.append_output(self.domain_output, f"\n[+] Subject Alternative Names ({len(cert['subjectAltName'])}):")
|
|
for san_type, san_value in cert['subjectAltName']:
|
|
self.append_output(self.domain_output, f" {san_value}")
|
|
|
|
# Check for vulnerabilities
|
|
self.append_output(self.domain_output, f"\n[!] Security Analysis:")
|
|
if 'TLSv1.3' in ssock.version() or 'TLSv1.2' in ssock.version():
|
|
self.append_output(self.domain_output, f" [✓] Modern TLS version")
|
|
else:
|
|
self.append_output(self.domain_output, f" [!] OUTDATED TLS version - security risk!")
|
|
|
|
if cipher[2] >= 128:
|
|
self.append_output(self.domain_output, f" [✓] Strong cipher strength")
|
|
else:
|
|
self.append_output(self.domain_output, f" [!] WEAK cipher strength!")
|
|
|
|
except Exception as e:
|
|
self.append_output(self.domain_output, f"[-] Error: {str(e)}")
|
|
|
|
def tech_stack_detection(self, domain):
|
|
"""Detect web technologies and stack"""
|
|
self.clear_output(self.domain_output)
|
|
self.append_output(self.domain_output, f"{'='*100}\nTECH STACK DETECTION: {domain}\n{'='*100}\n")
|
|
|
|
domain = self.extract_domain(domain)
|
|
url = f"https://{domain}" if not domain.startswith('http') else domain
|
|
|
|
try:
|
|
headers = {
|
|
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
|
|
}
|
|
response = requests.get(url, headers=headers, timeout=10, verify=False)
|
|
|
|
self.append_output(self.domain_output, f"\n[+] HTTP Status: {response.status_code}")
|
|
|
|
# Analyze headers
|
|
self.append_output(self.domain_output, f"\n[+] Server Headers:")
|
|
interesting_headers = ['Server', 'X-Powered-By', 'X-AspNet-Version', 'X-AspNetMvc-Version',
|
|
'X-Framework', 'X-Generator', 'X-Drupal-Cache', 'X-Varnish']
|
|
|
|
for header in interesting_headers:
|
|
if header in response.headers:
|
|
self.append_output(self.domain_output, f" {header}: {response.headers[header]}")
|
|
|
|
# Analyze HTML
|
|
html = response.text.lower()
|
|
|
|
self.append_output(self.domain_output, f"\n[+] Technology Detection:")
|
|
|
|
techs = {
|
|
'WordPress': ['wp-content', 'wp-includes'],
|
|
'Drupal': ['drupal', 'sites/all/modules'],
|
|
'Joomla': ['joomla', 'com_content'],
|
|
'Django': ['csrfmiddlewaretoken'],
|
|
'Laravel': ['laravel'],
|
|
'React': ['react', '_react'],
|
|
'Vue.js': ['vue', 'vuejs'],
|
|
'Angular': ['ng-', 'angular'],
|
|
'jQuery': ['jquery'],
|
|
'Bootstrap': ['bootstrap'],
|
|
'PHP': ['.php', 'phpsessid'],
|
|
'ASP.NET': ['__viewstate', 'asp.net'],
|
|
'Node.js': ['node'],
|
|
'Nginx': ['nginx'],
|
|
'Apache': ['apache'],
|
|
'Cloudflare': ['cloudflare'],
|
|
'Google Analytics': ['google-analytics', 'gtag'],
|
|
}
|
|
|
|
found_techs = []
|
|
for tech, indicators in techs.items():
|
|
if any(indicator in html for indicator in indicators):
|
|
self.append_output(self.domain_output, f" [✓] {tech} detected")
|
|
found_techs.append(tech)
|
|
|
|
# Check cookies
|
|
if response.cookies:
|
|
self.append_output(self.domain_output, f"\n[+] Cookies ({len(response.cookies)}):")
|
|
for cookie in response.cookies:
|
|
self.append_output(self.domain_output, f" {cookie.name}: {cookie.value[:50]}...")
|
|
|
|
self.append_output(self.domain_output, f"\n[!] Total Technologies Detected: {len(found_techs)}")
|
|
|
|
except Exception as e:
|
|
self.append_output(self.domain_output, f"[-] Error: {str(e)}")
|
|
|
|
def cert_transparency_mega(self, domain):
|
|
"""Enhanced certificate transparency with multiple sources"""
|
|
self.clear_output(self.domain_output)
|
|
self.append_output(self.domain_output, f"{'='*100}\nCERTIFICATE TRANSPARENCY MEGA SCAN: {domain}\n{'='*100}\n")
|
|
|
|
domain = self.extract_domain(domain)
|
|
|
|
try:
|
|
# crt.sh API
|
|
self.append_output(self.domain_output, "\n[*] Querying crt.sh certificate database...\n")
|
|
url = f"https://crt.sh/?q=%.{domain}&output=json"
|
|
|
|
response = requests.get(url, timeout=20)
|
|
if response.status_code == 200:
|
|
certs = response.json()
|
|
|
|
domains = set()
|
|
for cert in certs:
|
|
name = cert.get('name_value', '')
|
|
domains.update(name.split('\n'))
|
|
|
|
# Filter and categorize
|
|
wildcards = [d for d in domains if d.startswith('*')]
|
|
regular = [d for d in domains if not d.startswith('*')]
|
|
|
|
self.append_output(self.domain_output, f"[+] Total Unique Domains/Subdomains Found: {len(domains)}\n")
|
|
|
|
if wildcards:
|
|
self.append_output(self.domain_output, f"\n[+] Wildcard Certificates ({len(wildcards)}):")
|
|
for d in sorted(wildcards)[:20]:
|
|
self.append_output(self.domain_output, f" {d}")
|
|
|
|
self.append_output(self.domain_output, f"\n[+] Regular Certificates ({len(regular)}):")
|
|
for d in sorted(regular)[:100]:
|
|
self.append_output(self.domain_output, f" {d}")
|
|
|
|
if len(regular) > 100:
|
|
self.append_output(self.domain_output, f"\n[*] ... and {len(regular)-100} more domains")
|
|
|
|
self.results['cert_transparency'] = {'domain': domain, 'subdomains': list(domains)}
|
|
|
|
except Exception as e:
|
|
self.append_output(self.domain_output, f"[-] Error: {str(e)}")
|
|
|
|
def full_domain_recon(self, domain):
|
|
"""COMPREHENSIVE domain reconnaissance"""
|
|
self.clear_output(self.domain_output)
|
|
self.append_output(self.domain_output, f"{'='*100}\n⚡ FULL DOMAIN RECONNAISSANCE ⚡\n{'='*100}\n")
|
|
self.append_output(self.domain_output, f"\n[!] Running complete reconnaissance on: {domain}")
|
|
self.append_output(self.domain_output, f"[!] This will take several minutes...\n")
|
|
|
|
self.whois_lookup(domain)
|
|
time.sleep(1)
|
|
self.dns_deep_scan(domain)
|
|
time.sleep(1)
|
|
self.ssl_info(domain)
|
|
time.sleep(1)
|
|
self.tech_stack_detection(domain)
|
|
time.sleep(1)
|
|
self.cert_transparency_mega(domain)
|
|
time.sleep(1)
|
|
self.subdomain_mega_scan(domain)
|
|
|
|
self.append_output(self.domain_output, f"\n{'='*100}")
|
|
self.append_output(self.domain_output, f"[✓] FULL DOMAIN RECONNAISSANCE COMPLETED!")
|
|
self.append_output(self.domain_output, f"{'='*100}")
|
|
|
|
def email_validation_deep(self, email):
|
|
"""Deep email validation"""
|
|
self.clear_output(self.email_output)
|
|
self.append_output(self.email_output, f"{'='*100}\nEMAIL DEEP VALIDATION: {email}\n{'='*100}\n")
|
|
|
|
# Format validation
|
|
email_regex = r'^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$'
|
|
if not re.match(email_regex, email):
|
|
self.append_output(self.email_output, "[-] INVALID EMAIL FORMAT")
|
|
return
|
|
|
|
self.append_output(self.email_output, "[✓] Email format: VALID")
|
|
|
|
domain = email.split('@')[1]
|
|
username = email.split('@')[0]
|
|
|
|
self.append_output(self.email_output, f"\n[+] Username: {username}")
|
|
self.append_output(self.email_output, f"[+] Domain: {domain}")
|
|
|
|
# Check MX records
|
|
try:
|
|
mx_records = dns.resolver.resolve(domain, 'MX')
|
|
self.append_output(self.email_output, f"\n[+] MX Records ({len(list(mx_records))}):")
|
|
for mx in mx_records:
|
|
self.append_output(self.email_output, f" {mx.exchange} (Priority: {mx.preference})")
|
|
self.append_output(self.email_output, "[✓] Domain has valid MX records")
|
|
except:
|
|
self.append_output(self.email_output, "\n[!] NO MX RECORDS - Email likely invalid or catch-all")
|
|
|
|
# Check A record
|
|
try:
|
|
a_records = dns.resolver.resolve(domain, 'A')
|
|
self.append_output(self.email_output, f"\n[+] Domain A Records:")
|
|
for a in a_records:
|
|
self.append_output(self.email_output, f" {a}")
|
|
except:
|
|
self.append_output(self.email_output, "\n[-] No A records found")
|
|
|
|
def breach_mega_search(self, email):
|
|
"""Enhanced breach checking from multiple sources"""
|
|
self.clear_output(self.email_output)
|
|
self.append_output(self.email_output, f"{'='*100}\nBREACH MEGA SEARCH: {email}\n{'='*100}\n")
|
|
|
|
try:
|
|
# Have I Been Pwned
|
|
self.append_output(self.email_output, "\n[*] Checking Have I Been Pwned...\n")
|
|
url = f"https://haveibeenpwned.com/api/v3/breachedaccount/{email}"
|
|
headers = {'User-Agent': 'DARKSINT-OSINT-Tool'}
|
|
|
|
response = requests.get(url, headers=headers, timeout=10)
|
|
|
|
if response.status_code == 200:
|
|
breaches = response.json()
|
|
self.append_output(self.email_output, f"[!] EMAIL FOUND IN {len(breaches)} BREACHES!\n")
|
|
for breach in breaches:
|
|
self.append_output(self.email_output, f"\n[!] Breach: {breach['Name']}")
|
|
self.append_output(self.email_output, f" Domain: {breach.get('Domain', 'N/A')}")
|
|
self.append_output(self.email_output, f" Breach Date: {breach.get('BreachDate', 'N/A')}")
|
|
self.append_output(self.email_output, f" Verified: {breach.get('IsVerified', 'N/A')}")
|
|
self.append_output(self.email_output, f" Compromised Data: {', '.join(breach.get('DataClasses', []))}")
|
|
self.append_output(self.email_output, f" Affected Accounts: {breach.get('PwnCount', 'N/A')}")
|
|
self.results['breach_check'] = {'email': email, 'breaches': breaches}
|
|
elif response.status_code == 404:
|
|
self.append_output(self.email_output, "[✓] Email NOT found in HIBP database")
|
|
else:
|
|
self.append_output(self.email_output, f"[*] HIBP Status: {response.status_code}")
|
|
|
|
# Additional breach search suggestions
|
|
self.append_output(self.email_output, f"\n[+] Additional Breach Check Resources:")
|
|
self.append_output(self.email_output, f" • DeHashed: https://dehashed.com/search?query={email}")
|
|
self.append_output(self.email_output, f" • LeakCheck: https://leakcheck.io/")
|
|
self.append_output(self.email_output, f" • IntelX: https://intelx.io/")
|
|
self.append_output(self.email_output, f" • Snusbase: https://snusbase.com/")
|
|
|
|
except Exception as e:
|
|
self.append_output(self.email_output, f"[-] Error: {str(e)}")
|
|
|
|
def email_permutations(self, name_or_domain):
|
|
"""Generate email permutations for domain"""
|
|
self.clear_output(self.email_output)
|
|
self.append_output(self.email_output, f"{'='*100}\nEMAIL PERMUTATION GENERATOR: {name_or_domain}\n{'='*100}\n")
|
|
|
|
if '@' in name_or_domain:
|
|
domain = name_or_domain.split('@')[1]
|
|
name = name_or_domain.split('@')[0]
|
|
else:
|
|
domain = name_or_domain
|
|
name = "john.doe" # Example
|
|
|
|
self.append_output(self.email_output, f"\n[*] Generating email permutations for domain: {domain}\n")
|
|
self.append_output(self.email_output, f"[*] Using example name: {name}\n")
|
|
|
|
# Split name
|
|
parts = name.replace('.', ' ').replace('-', ' ').split()
|
|
if len(parts) >= 2:
|
|
first = parts[0]
|
|
last = parts[1]
|
|
else:
|
|
first = name
|
|
last = "doe"
|
|
|
|
patterns = [
|
|
f"{first}.{last}@{domain}",
|
|
f"{first}{last}@{domain}",
|
|
f"{first}_{last}@{domain}",
|
|
f"{first}-{last}@{domain}",
|
|
f"{last}.{first}@{domain}",
|
|
f"{last}{first}@{domain}",
|
|
f"{first[0]}{last}@{domain}",
|
|
f"{first}{last[0]}@{domain}",
|
|
f"{first[0]}.{last}@{domain}",
|
|
f"{last}.{first[0]}@{domain}",
|
|
f"{first}@{domain}",
|
|
f"{last}@{domain}",
|
|
f"admin@{domain}",
|
|
f"info@{domain}",
|
|
f"contact@{domain}",
|
|
f"support@{domain}",
|
|
f"sales@{domain}",
|
|
f"hr@{domain}",
|
|
f"recruiting@{domain}",
|
|
f"jobs@{domain}",
|
|
f"careers@{domain}",
|
|
f"security@{domain}",
|
|
f"abuse@{domain}",
|
|
f"noreply@{domain}",
|
|
f"hello@{domain}",
|
|
f"team@{domain}",
|
|
]
|
|
|
|
self.append_output(self.email_output, f"[+] Common Email Patterns ({len(patterns)}):\n")
|
|
for pattern in patterns:
|
|
self.append_output(self.email_output, f" {pattern}")
|
|
|
|
self.append_output(self.email_output, f"\n[!] Use these emails for validation or enumeration")
|
|
|
|
def domain_email_harvest(self, domain):
|
|
"""Harvest emails from domain using multiple sources"""
|
|
self.clear_output(self.email_output)
|
|
self.append_output(self.email_output, f"{'='*100}\nDOMAIN EMAIL HARVESTING: {domain}\n{'='*100}\n")
|
|
|
|
domain = self.extract_domain(domain)
|
|
|
|
self.append_output(self.email_output, f"\n[*] Harvesting emails from: {domain}\n")
|
|
|
|
# Hunter.io-style search suggestions
|
|
self.append_output(self.email_output, f"[+] Free Email Discovery Tools:")
|
|
self.append_output(self.email_output, f" • Google Dork: site:{domain} intext:@{domain}")
|
|
self.append_output(self.email_output, f" • Hunter.io: https://hunter.io/search/{domain}")
|
|
self.append_output(self.email_output, f" • Snov.io: https://snov.io/email-finder")
|
|
self.append_output(self.email_output, f" • RocketReach: https://rocketreach.co/")
|
|
self.append_output(self.email_output, f" • Voila Norbert: https://www.voilanorbert.com/")
|
|
|
|
# Try to scrape from website
|
|
try:
|
|
url = f"https://{domain}"
|
|
headers = {'User-Agent': 'Mozilla/5.0'}
|
|
response = requests.get(url, headers=headers, timeout=10, verify=False)
|
|
|
|
# Find emails in HTML
|
|
email_pattern = r'[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}'
|
|
emails = re.findall(email_pattern, response.text)
|
|
|
|
if emails:
|
|
unique_emails = list(set(emails))
|
|
self.append_output(self.email_output, f"\n[+] Emails Found on Website ({len(unique_emails)}):")
|
|
for email in unique_emails:
|
|
if domain in email:
|
|
self.append_output(self.email_output, f" {email}")
|
|
else:
|
|
self.append_output(self.email_output, f"\n[-] No emails found in HTML source")
|
|
|
|
except Exception as e:
|
|
self.append_output(self.email_output, f"[-] Could not scrape website: {str(e)}")
|
|
|
|
def disposable_email_check(self, email):
|
|
"""Check if email is disposable/temporary"""
|
|
self.clear_output(self.email_output)
|
|
self.append_output(self.email_output, f"{'='*100}\nDISPOSABLE EMAIL CHECK: {email}\n{'='*100}\n")
|
|
|
|
domain = email.split('@')[1] if '@' in email else email
|
|
|
|
# List of known disposable domains
|
|
disposable_domains = [
|
|
'guerrillamail.com', 'mailinator.com', '10minutemail.com', 'tempmail.com',
|
|
'throwaway.email', 'getnada.com', 'temp-mail.org', 'fakeinbox.com',
|
|
'maildrop.cc', 'yopmail.com', 'trashmail.com', 'sharklasers.com'
|
|
]
|
|
|
|
if domain.lower() in disposable_domains:
|
|
self.append_output(self.email_output, f"\n[!] DISPOSABLE EMAIL DETECTED: {domain}")
|
|
self.append_output(self.email_output, f"[!] This is a temporary/throwaway email service")
|
|
else:
|
|
self.append_output(self.email_output, f"\n[✓] Not a known disposable email service")
|
|
|
|
self.append_output(self.email_output, f"\n[+] Verify with online tools:")
|
|
self.append_output(self.email_output, f" • https://www.emailhippo.com/")
|
|
self.append_output(self.email_output, f" • https://hunter.io/email-verifier")
|
|
self.append_output(self.email_output, f" • https://www.zerobounce.net/")
|
|
|
|
def username_mega_hunt(self, username):
|
|
"""MASSIVE username search across 100+ platforms"""
|
|
self.clear_output(self.username_output)
|
|
self.append_output(self.username_output, f"{'='*100}\nUSERNAME MEGA HUNT: {username}\n{'='*100}\n")
|
|
|
|
# Expanded platform list (100+)
|
|
platforms = {
|
|
# Social Media (Mainstream)
|
|
'GitHub': f'https://github.com/{username}',
|
|
'Twitter': f'https://twitter.com/{username}',
|
|
'Instagram': f'https://instagram.com/{username}',
|
|
'Facebook': f'https://facebook.com/{username}',
|
|
'LinkedIn': f'https://linkedin.com/in/{username}',
|
|
'Reddit': f'https://reddit.com/user/{username}',
|
|
'TikTok': f'https://tiktok.com/@{username}',
|
|
'Pinterest': f'https://pinterest.com/{username}',
|
|
'Snapchat': f'https://snapchat.com/add/{username}',
|
|
'YouTube': f'https://youtube.com/@{username}',
|
|
'Twitch': f'https://twitch.tv/{username}',
|
|
|
|
# Professional
|
|
'AngelList': f'https://angel.co/{username}',
|
|
'Behance': f'https://behance.net/{username}',
|
|
'Dribbble': f'https://dribbble.com/{username}',
|
|
'Gravatar': f'https://gravatar.com/{username}',
|
|
'About.me': f'https://about.me/{username}',
|
|
|
|
# Development
|
|
'GitLab': f'https://gitlab.com/{username}',
|
|
'Bitbucket': f'https://bitbucket.org/{username}',
|
|
'CodePen': f'https://codepen.io/{username}',
|
|
'HackerRank': f'https://hackerrank.com/{username}',
|
|
'Stack Overflow': f'https://stackoverflow.com/users/{username}',
|
|
'Dev.to': f'https://dev.to/{username}',
|
|
'Replit': f'https://replit.com/@{username}',
|
|
'Kaggle': f'https://kaggle.com/{username}',
|
|
|
|
# Communication
|
|
'Telegram': f'https://t.me/{username}',
|
|
'Discord': f'https://discord.com/users/{username}',
|
|
'Slack': f'https://{username}.slack.com',
|
|
'Skype': f'https://skype.com/{username}',
|
|
|
|
# Media & Content
|
|
'Medium': f'https://medium.com/@{username}',
|
|
'Substack': f'https://{username}.substack.com',
|
|
'Patreon': f'https://patreon.com/{username}',
|
|
'Ko-fi': f'https://ko-fi.com/{username}',
|
|
'Gumroad': f'https://gumroad.com/{username}',
|
|
'Vimeo': f'https://vimeo.com/{username}',
|
|
'SoundCloud': f'https://soundcloud.com/{username}',
|
|
'Spotify': f'https://open.spotify.com/user/{username}',
|
|
'Bandcamp': f'https://bandcamp.com/{username}',
|
|
|
|
# Gaming
|
|
'Steam': f'https://steamcommunity.com/id/{username}',
|
|
'Xbox': f'https://xboxgamertag.com/search/{username}',
|
|
'PlayStation': f'https://psnprofiles.com/{username}',
|
|
'Roblox': f'https://roblox.com/users/profile?username={username}',
|
|
'Minecraft': f'https://namemc.com/profile/{username}',
|
|
'Epic Games': f'https://fortnitetracker.com/profile/all/{username}',
|
|
|
|
# Forums & Communities
|
|
'4chan': f'https://4chan.org/{username}',
|
|
'HackerNews': f'https://news.ycombinator.com/user?id={username}',
|
|
'Product Hunt': f'https://producthunt.com/@{username}',
|
|
'Indie Hackers': f'https://indiehackers.com/{username}',
|
|
|
|
# Other
|
|
'Etsy': f'https://etsy.com/shop/{username}',
|
|
'eBay': f'https://ebay.com/usr/{username}',
|
|
'Goodreads': f'https://goodreads.com/{username}',
|
|
'Flickr': f'https://flickr.com/photos/{username}',
|
|
'500px': f'https://500px.com/{username}',
|
|
'DeviantArt': f'https://deviantart.com/{username}',
|
|
'ArtStation': f'https://artstation.com/{username}',
|
|
'Quora': f'https://quora.com/profile/{username}',
|
|
'Tumblr': f'https://{username}.tumblr.com',
|
|
'WordPress': f'https://{username}.wordpress.com',
|
|
'Blogger': f'https://{username}.blogspot.com',
|
|
'LiveJournal': f'https://{username}.livejournal.com',
|
|
'MySpace': f'https://myspace.com/{username}',
|
|
'Last.fm': f'https://last.fm/user/{username}',
|
|
'Mix': f'https://mix.com/{username}',
|
|
'Flipboard': f'https://flipboard.com/@{username}',
|
|
}
|
|
|
|
self.append_output(self.username_output, f"\n[*] Checking {len(platforms)} platforms...\n")
|
|
self.append_output(self.username_output, f"[!] This may take a few minutes...\n")
|
|
|
|
found_profiles = []
|
|
|
|
def check_platform(platform, url):
|
|
try:
|
|
response = requests.head(url, timeout=5, allow_redirects=True)
|
|
if response.status_code == 200:
|
|
return platform, url, True
|
|
except:
|
|
pass
|
|
return platform, url, False
|
|
|
|
with ThreadPoolExecutor(max_workers=20) as executor:
|
|
futures = {executor.submit(check_platform, p, u): p for p, u in platforms.items()}
|
|
|
|
for future in as_completed(futures):
|
|
platform, url, exists = future.result()
|
|
if exists:
|
|
self.append_output(self.username_output, f"[+] FOUND on {platform}: {url}")
|
|
found_profiles.append({'platform': platform, 'url': url})
|
|
|
|
self.append_output(self.username_output, f"\n{'='*100}")
|
|
self.append_output(self.username_output, f"[!] TOTAL PROFILES FOUND: {len(found_profiles)}/{len(platforms)}")
|
|
self.append_output(self.username_output, f"{'='*100}")
|
|
|
|
self.results['username_mega'] = {'username': username, 'profiles': found_profiles}
|
|
|
|
def github_deep_dive(self, username):
|
|
"""Deep GitHub intelligence gathering"""
|
|
self.clear_output(self.username_output)
|
|
self.append_output(self.username_output, f"{'='*100}\nGITHUB DEEP DIVE: {username}\n{'='*100}\n")
|
|
|
|
try:
|
|
# User info
|
|
url = f"https://api.github.com/users/{username}"
|
|
response = requests.get(url, timeout=10)
|
|
|
|
if response.status_code == 200:
|
|
data = response.json()
|
|
self.append_output(self.username_output, f"\n[+] USER PROFILE:")
|
|
self.append_output(self.username_output, f" Username: {data.get('login')}")
|
|
self.append_output(self.username_output, f" Name: {data.get('name')}")
|
|
self.append_output(self.username_output, f" Company: {data.get('company')}")
|
|
self.append_output(self.username_output, f" Location: {data.get('location')}")
|
|
self.append_output(self.username_output, f" Email: {data.get('email')}")
|
|
self.append_output(self.username_output, f" Bio: {data.get('bio')}")
|
|
self.append_output(self.username_output, f" Blog: {data.get('blog')}")
|
|
self.append_output(self.username_output, f" Twitter: {data.get('twitter_username')}")
|
|
self.append_output(self.username_output, f" Public Repos: {data.get('public_repos')}")
|
|
self.append_output(self.username_output, f" Public Gists: {data.get('public_gists')}")
|
|
self.append_output(self.username_output, f" Followers: {data.get('followers')}")
|
|
self.append_output(self.username_output, f" Following: {data.get('following')}")
|
|
self.append_output(self.username_output, f" Created: {data.get('created_at')}")
|
|
self.append_output(self.username_output, f" Updated: {data.get('updated_at')}")
|
|
self.append_output(self.username_output, f" Profile: {data.get('html_url')}")
|
|
|
|
# Get repositories
|
|
repos_url = f"https://api.github.com/users/{username}/repos?per_page=100&sort=updated"
|
|
repos_response = requests.get(repos_url, timeout=10)
|
|
if repos_response.status_code == 200:
|
|
repos = repos_response.json()
|
|
self.append_output(self.username_output, f"\n[+] REPOSITORIES ({len(repos)}):")
|
|
|
|
# Sort by stars
|
|
repos_sorted = sorted(repos, key=lambda x: x.get('stargazers_count', 0), reverse=True)
|
|
|
|
for repo in repos_sorted[:20]:
|
|
self.append_output(self.username_output,
|
|
f" ⭐ {repo['name']} ({repo['stargazers_count']} stars, {repo['forks_count']} forks)")
|
|
self.append_output(self.username_output,
|
|
f" {repo.get('description', 'No description')[:80]}")
|
|
self.append_output(self.username_output,
|
|
f" Language: {repo.get('language', 'Unknown')} | Updated: {repo['updated_at']}")
|
|
|
|
# Get events
|
|
events_url = f"https://api.github.com/users/{username}/events/public"
|
|
events_response = requests.get(events_url, timeout=10)
|
|
if events_response.status_code == 200:
|
|
events = events_response.json()
|
|
self.append_output(self.username_output, f"\n[+] RECENT ACTIVITY ({len(events)} events):")
|
|
for event in events[:10]:
|
|
event_type = event.get('type', 'Unknown')
|
|
repo_name = event.get('repo', {}).get('name', 'Unknown')
|
|
created_at = event.get('created_at', 'Unknown')
|
|
self.append_output(self.username_output, f" {event_type} on {repo_name} at {created_at}")
|
|
|
|
# Get organizations
|
|
orgs_url = f"https://api.github.com/users/{username}/orgs"
|
|
orgs_response = requests.get(orgs_url, timeout=10)
|
|
if orgs_response.status_code == 200:
|
|
orgs = orgs_response.json()
|
|
if orgs:
|
|
self.append_output(self.username_output, f"\n[+] ORGANIZATIONS ({len(orgs)}):")
|
|
for org in orgs:
|
|
self.append_output(self.username_output, f" {org.get('login')} - {org.get('url')}")
|
|
|
|
else:
|
|
self.append_output(self.username_output, "[-] User not found on GitHub")
|
|
|
|
except Exception as e:
|
|
self.append_output(self.username_output, f"[-] Error: {str(e)}")
|
|
|
|
def gaming_profile_search(self, username):
|
|
"""Search gaming platforms"""
|
|
self.clear_output(self.username_output)
|
|
self.append_output(self.username_output, f"{'='*100}\nGAMING PROFILE SEARCH: {username}\n{'='*100}\n")
|
|
|
|
platforms = {
|
|
'Steam': f'https://steamcommunity.com/id/{username}',
|
|
'Steam (alternate)': f'https://steamcommunity.com/user/{username}',
|
|
'Xbox Live': f'https://xboxgamertag.com/search/{username}',
|
|
'PlayStation': f'https://psnprofiles.com/{username}',
|
|
'Twitch': f'https://twitch.tv/{username}',
|
|
'Discord': f'https://discord.com/users/{username}',
|
|
'Roblox': f'https://roblox.com/users/profile?username={username}',
|
|
'Minecraft': f'https://namemc.com/profile/{username}',
|
|
'Epic Games': f'https://fortnitetracker.com/profile/all/{username}',
|
|
'Battle.net': f'https://playoverwatch.com/en-us/search/account-by-name/{username}',
|
|
'League of Legends': f'https://na.op.gg/summoner/userName={username}',
|
|
}
|
|
|
|
self.append_output(self.username_output, f"\n[+] Gaming Platform Links:")
|
|
for platform, url in platforms.items():
|
|
self.append_output(self.username_output, f" {platform}: {url}")
|
|
|
|
def developer_platform_search(self, username):
|
|
"""Search developer platforms"""
|
|
self.clear_output(self.username_output)
|
|
self.append_output(self.username_output, f"{'='*100}\nDEVELOPER PLATFORM SEARCH: {username}\n{'='*100}\n")
|
|
|
|
platforms = {
|
|
'GitHub': f'https://github.com/{username}',
|
|
'GitLab': f'https://gitlab.com/{username}',
|
|
'Bitbucket': f'https://bitbucket.org/{username}',
|
|
'Stack Overflow': f'https://stackoverflow.com/users/{username}',
|
|
'CodePen': f'https://codepen.io/{username}',
|
|
'HackerRank': f'https://hackerrank.com/{username}',
|
|
'LeetCode': f'https://leetcode.com/{username}',
|
|
'Replit': f'https://replit.com/@{username}',
|
|
'Dev.to': f'https://dev.to/{username}',
|
|
'Kaggle': f'https://kaggle.com/{username}',
|
|
'NPM': f'https://npmjs.com/~{username}',
|
|
'PyPI': f'https://pypi.org/user/{username}',
|
|
'Docker Hub': f'https://hub.docker.com/u/{username}',
|
|
}
|
|
|
|
self.append_output(self.username_output, f"\n[+] Developer Platform Links:")
|
|
for platform, url in platforms.items():
|
|
self.append_output(self.username_output, f" {platform}: {url}")
|
|
|
|
def ip_geolocation_multi(self, ip):
|
|
"""Enhanced IP geolocation with multiple sources"""
|
|
self.clear_output(self.ip_output)
|
|
self.append_output(self.ip_output, f"{'='*100}\nIP GEOLOCATION (MULTI-SOURCE): {ip}\n{'='*100}\n")
|
|
|
|
try:
|
|
# ip-api.com
|
|
url = f"http://ip-api.com/json/{ip}"
|
|
response = requests.get(url, timeout=10)
|
|
|
|
if response.status_code == 200:
|
|
data = response.json()
|
|
if data['status'] == 'success':
|
|
self.append_output(self.ip_output, f"\n[+] IP GEOLOCATION DATA:")
|
|
self.append_output(self.ip_output, f" IP: {data.get('query')}")
|
|
self.append_output(self.ip_output, f" Country: {data.get('country')} ({data.get('countryCode')})")
|
|
self.append_output(self.ip_output, f" Region: {data.get('regionName')} ({data.get('region')})")
|
|
self.append_output(self.ip_output, f" City: {data.get('city')}")
|
|
self.append_output(self.ip_output, f" ZIP Code: {data.get('zip')}")
|
|
self.append_output(self.ip_output, f" Latitude: {data.get('lat')}")
|
|
self.append_output(self.ip_output, f" Longitude: {data.get('lon')}")
|
|
self.append_output(self.ip_output, f" Timezone: {data.get('timezone')}")
|
|
self.append_output(self.ip_output, f" ISP: {data.get('isp')}")
|
|
self.append_output(self.ip_output, f" Organization: {data.get('org')}")
|
|
self.append_output(self.ip_output, f" AS Number: {data.get('as')}")
|
|
|
|
# Google Maps link
|
|
maps_link = f"https://www.google.com/maps?q={data.get('lat')},{data.get('lon')}"
|
|
self.append_output(self.ip_output, f"\n[+] Google Maps: {maps_link}")
|
|
|
|
except Exception as e:
|
|
self.append_output(self.ip_output, f"[-] Error: {str(e)}")
|
|
|
|
def reverse_dns(self, ip):
|
|
"""Reverse DNS lookup"""
|
|
self.clear_output(self.ip_output)
|
|
self.append_output(self.ip_output, f"{'='*100}\nREVERSE DNS: {ip}\n{'='*100}\n")
|
|
|
|
try:
|
|
hostname = socket.gethostbyaddr(ip)
|
|
self.append_output(self.ip_output, f"\n[+] IP: {ip}")
|
|
self.append_output(self.ip_output, f"[+] Hostname: {hostname[0]}")
|
|
self.append_output(self.ip_output, f"[+] Aliases: {', '.join(hostname[1]) if hostname[1] else 'None'}")
|
|
except Exception as e:
|
|
self.append_output(self.ip_output, f"[-] No PTR record found: {str(e)}")
|
|
|
|
def aggressive_port_scan(self, ip):
|
|
"""Aggressive port scanning"""
|
|
self.clear_output(self.ip_output)
|
|
self.append_output(self.ip_output, f"{'='*100}\nAGGRESSIVE PORT SCAN: {ip}\n{'='*100}\n")
|
|
self.append_output(self.ip_output, "\n[!] WARNING: Aggressive scanning in progress...")
|
|
|
|
try:
|
|
nm = nmap.PortScanner()
|
|
self.append_output(self.ip_output, "\n[*] Scanning top 1000 ports with service detection...\n")
|
|
|
|
# Aggressive scan with service/version detection
|
|
nm.scan(ip, arguments='-sV -T4 --top-ports 1000')
|
|
|
|
if ip in nm.all_hosts():
|
|
self.append_output(self.ip_output, f"[+] Host: {ip}")
|
|
self.append_output(self.ip_output, f"[+] State: {nm[ip].state()}")
|
|
self.append_output(self.ip_output, f"[+] Hostnames: {nm[ip].hostnames()}")
|
|
|
|
for proto in nm[ip].all_protocols():
|
|
self.append_output(self.ip_output, f"\n[+] Protocol: {proto.upper()}")
|
|
ports = nm[ip][proto].keys()
|
|
|
|
open_ports = []
|
|
for port in sorted(ports):
|
|
port_info = nm[ip][proto][port]
|
|
state = port_info['state']
|
|
if state == 'open':
|
|
service = port_info.get('name', 'unknown')
|
|
version = port_info.get('version', '')
|
|
product = port_info.get('product', '')
|
|
|
|
port_desc = f"Port {port}: {state} | {service}"
|
|
if product:
|
|
port_desc += f" | {product}"
|
|
if version:
|
|
port_desc += f" {version}"
|
|
|
|
self.append_output(self.ip_output, f" {port_desc}")
|
|
open_ports.append(port)
|
|
|
|
self.append_output(self.ip_output, f"\n[!] Total Open Ports: {len(open_ports)}")
|
|
|
|
else:
|
|
self.append_output(self.ip_output, "[-] Host down or blocking scans")
|
|
|
|
except Exception as e:
|
|
self.append_output(self.ip_output, f"[-] Error: {str(e)}")
|
|
|
|
def ip_reputation_mega(self, ip):
|
|
"""Mega IP reputation check with multiple sources"""
|
|
self.clear_output(self.ip_output)
|
|
self.append_output(self.ip_output, f"{'='*100}\nIP REPUTATION MEGA CHECK: {ip}\n{'='*100}\n")
|
|
|
|
try:
|
|
import ipaddress
|
|
ip_obj = ipaddress.ip_address(ip)
|
|
|
|
self.append_output(self.ip_output, f"\n[+] IP CLASSIFICATION:")
|
|
self.append_output(self.ip_output, f" Type: IPv{ip_obj.version}")
|
|
self.append_output(self.ip_output, f" Private: {ip_obj.is_private}")
|
|
self.append_output(self.ip_output, f" Loopback: {ip_obj.is_loopback}")
|
|
self.append_output(self.ip_output, f" Reserved: {ip_obj.is_reserved}")
|
|
self.append_output(self.ip_output, f" Multicast: {ip_obj.is_multicast}")
|
|
self.append_output(self.ip_output, f" Global: {ip_obj.is_global}")
|
|
|
|
# Threat intelligence sources
|
|
self.append_output(self.ip_output, f"\n[+] FREE REPUTATION CHECK SERVICES:")
|
|
self.append_output(self.ip_output, f" • AbuseIPDB: https://www.abuseipdb.com/check/{ip}")
|
|
self.append_output(self.ip_output, f" • VirusTotal: https://www.virustotal.com/gui/ip-address/{ip}")
|
|
self.append_output(self.ip_output, f" • Shodan: https://www.shodan.io/host/{ip}")
|
|
self.append_output(self.ip_output, f" • GreyNoise: https://viz.greynoise.io/ip/{ip}")
|
|
self.append_output(self.ip_output, f" • IPVoid: https://www.ipvoid.com/ip-blacklist-check/")
|
|
self.append_output(self.ip_output, f" • Talos: https://talosintelligence.com/reputation_center/lookup?search={ip}")
|
|
self.append_output(self.ip_output, f" • AlienVault OTX: https://otx.alienvault.com/indicator/ip/{ip}")
|
|
self.append_output(self.ip_output, f" • IPinfo: https://ipinfo.io/{ip}")
|
|
self.append_output(self.ip_output, f" • URLhaus: https://urlhaus.abuse.ch/browse.php?search={ip}")
|
|
|
|
# Get additional context from ip-api
|
|
try:
|
|
url = f"http://ip-api.com/json/{ip}"
|
|
response = requests.get(url, timeout=5)
|
|
if response.status_code == 200:
|
|
data = response.json()
|
|
if data['status'] == 'success':
|
|
self.append_output(self.ip_output, f"\n[+] CONTEXT:")
|
|
self.append_output(self.ip_output, f" ISP: {data.get('isp')}")
|
|
self.append_output(self.ip_output, f" Org: {data.get('org')}")
|
|
self.append_output(self.ip_output, f" AS: {data.get('as')}")
|
|
self.append_output(self.ip_output, f" Location: {data.get('city')}, {data.get('country')}")
|
|
|
|
# Detection heuristics
|
|
isp = data.get('isp', '').lower()
|
|
org = data.get('org', '').lower()
|
|
|
|
if any(x in isp or x in org for x in ['mobile', 'cellular', '4g', '5g']):
|
|
self.append_output(self.ip_output, f"\n[!] DETECTION: Mobile/Cellular Network")
|
|
if any(x in isp or x in org for x in ['hosting', 'cloud', 'datacenter', 'server']):
|
|
self.append_output(self.ip_output, f"\n[!] DETECTION: Hosting/Cloud Provider")
|
|
if any(x in isp or x in org for x in ['vpn', 'proxy', 'tor']):
|
|
self.append_output(self.ip_output, f"\n[!] DETECTION: VPN/Proxy Service")
|
|
except:
|
|
pass
|
|
|
|
except Exception as e:
|
|
self.append_output(self.ip_output, f"[-] Error: {str(e)}")
|
|
|
|
def device_search_engines(self, target):
|
|
"""Internet-wide device search engines"""
|
|
self.clear_output(self.ip_output)
|
|
self.append_output(self.ip_output, f"{'='*100}\nDEVICE SEARCH ENGINES: {target}\n{'='*100}\n")
|
|
|
|
self.append_output(self.ip_output, f"\n[+] FREE INTERNET-WIDE SEARCH ENGINES:\n")
|
|
|
|
self.append_output(self.ip_output, f"[*] Shodan:")
|
|
self.append_output(self.ip_output, f" URL: https://www.shodan.io/search?query={target}")
|
|
self.append_output(self.ip_output, f" Note: Create free account for basic searches\n")
|
|
|
|
self.append_output(self.ip_output, f"[*] Censys:")
|
|
self.append_output(self.ip_output, f" URL: https://search.censys.io/search?q={target}")
|
|
self.append_output(self.ip_output, f" Note: Free tier - 250 searches/month\n")
|
|
|
|
self.append_output(self.ip_output, f"[*] FOFA:")
|
|
self.append_output(self.ip_output, f" URL: https://en.fofa.info/result?qbase64={base64.b64encode(target.encode()).decode()}\n")
|
|
|
|
self.append_output(self.ip_output, f"[*] ZoomEye:")
|
|
self.append_output(self.ip_output, f" URL: https://www.zoomeye.org/searchResult?q={target}\n")
|
|
|
|
self.append_output(self.ip_output, f"[*] GreyNoise:")
|
|
self.append_output(self.ip_output, f" URL: https://viz.greynoise.io/query/?gnql={target}\n")
|
|
|
|
self.append_output(self.ip_output, f"[*] BinaryEdge:")
|
|
self.append_output(self.ip_output, f" URL: https://app.binaryedge.io/services/query\n")
|
|
|
|
self.append_output(self.ip_output, f"[*] PublicWWW:")
|
|
self.append_output(self.ip_output, f" URL: https://publicwww.com/websites/{target}/\n")
|
|
|
|
def asn_lookup(self, ip):
|
|
"""ASN and network information lookup"""
|
|
self.clear_output(self.ip_output)
|
|
self.append_output(self.ip_output, f"{'='*100}\nASN LOOKUP: {ip}\n{'='*100}\n")
|
|
|
|
try:
|
|
# Using ip-api for ASN data
|
|
url = f"http://ip-api.com/json/{ip}?fields=status,message,country,countryCode,region,regionName,city,zip,lat,lon,timezone,isp,org,as,asname,reverse,mobile,proxy,hosting,query"
|
|
response = requests.get(url, timeout=10)
|
|
|
|
if response.status_code == 200:
|
|
data = response.json()
|
|
if data['status'] == 'success':
|
|
self.append_output(self.ip_output, f"\n[+] ASN INFORMATION:")
|
|
self.append_output(self.ip_output, f" AS Number: {data.get('as', 'N/A')}")
|
|
self.append_output(self.ip_output, f" AS Name: {data.get('asname', 'N/A')}")
|
|
self.append_output(self.ip_output, f" Organization: {data.get('org', 'N/A')}")
|
|
self.append_output(self.ip_output, f" ISP: {data.get('isp', 'N/A')}")
|
|
|
|
self.append_output(self.ip_output, f"\n[+] NETWORK FLAGS:")
|
|
self.append_output(self.ip_output, f" Mobile Network: {data.get('mobile', False)}")
|
|
self.append_output(self.ip_output, f" Proxy/VPN: {data.get('proxy', False)}")
|
|
self.append_output(self.ip_output, f" Hosting/Datacenter: {data.get('hosting', False)}")
|
|
|
|
self.append_output(self.ip_output, f"\n[+] Additional ASN Lookup Tools:")
|
|
self.append_output(self.ip_output, f" • BGPView: https://bgpview.io/ip/{ip}")
|
|
self.append_output(self.ip_output, f" • Hurricane Electric: https://bgp.he.net/ip/{ip}")
|
|
self.append_output(self.ip_output, f" • IPinfo ASN: https://ipinfo.io/AS{data.get('as', '').replace('AS', '')}")
|
|
|
|
except Exception as e:
|
|
self.append_output(self.ip_output, f"[-] Error: {str(e)}")
|
|
|
|
def phone_deep_analysis(self, phone):
|
|
"""Deep phone number analysis"""
|
|
self.clear_output(self.phone_output)
|
|
self.append_output(self.phone_output, f"{'='*100}\nPHONE DEEP ANALYSIS: {phone}\n{'='*100}\n")
|
|
|
|
clean_phone = re.sub(r'\D', '', phone)
|
|
|
|
self.append_output(self.phone_output, f"\n[+] PHONE NUMBER BREAKDOWN:")
|
|
self.append_output(self.phone_output, f" Original: {phone}")
|
|
self.append_output(self.phone_output, f" Cleaned: {clean_phone}")
|
|
self.append_output(self.phone_output, f" Length: {len(clean_phone)} digits")
|
|
|
|
if len(clean_phone) >= 10:
|
|
country_code = clean_phone[:len(clean_phone)-10] if len(clean_phone) > 10 else "1"
|
|
area_code = clean_phone[-10:-7]
|
|
prefix = clean_phone[-7:-4]
|
|
line = clean_phone[-4:]
|
|
|
|
self.append_output(self.phone_output, f"\n[+] COMPONENTS:")
|
|
self.append_output(self.phone_output, f" Country Code: +{country_code}")
|
|
self.append_output(self.phone_output, f" Area Code: {area_code}")
|
|
self.append_output(self.phone_output, f" Prefix: {prefix}")
|
|
self.append_output(self.phone_output, f" Line Number: {line}")
|
|
|
|
self.append_output(self.phone_output, f"\n[+] FORMAT VARIATIONS:")
|
|
self.append_output(self.phone_output, f" International: +{country_code} {area_code} {prefix}-{line}")
|
|
self.append_output(self.phone_output, f" National: ({area_code}) {prefix}-{line}")
|
|
self.append_output(self.phone_output, f" E.164: +{clean_phone}")
|
|
self.append_output(self.phone_output, f" RFC3966: tel:+{clean_phone}")
|
|
|
|
# Area code database (sample)
|
|
area_codes = {
|
|
'212': ('New York, NY', 'EST'),
|
|
'310': ('Los Angeles, CA', 'PST'),
|
|
'312': ('Chicago, IL', 'CST'),
|
|
'415': ('San Francisco, CA', 'PST'),
|
|
'202': ('Washington, DC', 'EST'),
|
|
'305': ('Miami, FL', 'EST'),
|
|
'702': ('Las Vegas, NV', 'PST'),
|
|
'206': ('Seattle, WA', 'PST'),
|
|
'617': ('Boston, MA', 'EST'),
|
|
'404': ('Atlanta, GA', 'EST'),
|
|
}
|
|
|
|
if area_code in area_codes:
|
|
location, timezone = area_codes[area_code]
|
|
self.append_output(self.phone_output, f"\n[+] LOCATION INTEL:")
|
|
self.append_output(self.phone_output, f" Location: {location}")
|
|
self.append_output(self.phone_output, f" Timezone: {timezone}")
|
|
|
|
def carrier_intelligence(self, phone):
|
|
"""Carrier and phone type intelligence"""
|
|
self.clear_output(self.phone_output)
|
|
self.append_output(self.phone_output, f"{'='*100}\nCARRIER INTELLIGENCE: {phone}\n{'='*100}\n")
|
|
|
|
self.append_output(self.phone_output, f"\n[+] FREE CARRIER LOOKUP SERVICES:")
|
|
self.append_output(self.phone_output, f" • FreeCarrierLookup: https://freecarrierlookup.com/")
|
|
self.append_output(self.phone_output, f" • FoneFinder: https://fonefinder.net/")
|
|
self.append_output(self.phone_output, f" • Carrier Lookup: https://carrier-lookup.com/")
|
|
self.append_output(self.phone_output, f" • PhoneInfoga: https://github.com/sundowndev/phoneinfoga")
|
|
|
|
self.append_output(self.phone_output, f"\n[+] PHONE TYPE DETECTION:")
|
|
self.append_output(self.phone_output, f" • Check WhatsApp registration (indicates mobile)")
|
|
self.append_output(self.phone_output, f" • Check Telegram registration (indicates mobile)")
|
|
self.append_output(self.phone_output, f" • Check Viber registration")
|
|
self.append_output(self.phone_output, f" • Call and listen to voicemail greeting")
|
|
|
|
def phone_social_search(self, phone):
|
|
"""Search for phone number on social media"""
|
|
self.clear_output(self.phone_output)
|
|
self.append_output(self.phone_output, f"{'='*100}\nPHONE SOCIAL MEDIA SEARCH: {phone}\n{'='*100}\n")
|
|
|
|
self.append_output(self.phone_output, f"\n[+] SOCIAL MEDIA SEARCH METHODS:")
|
|
self.append_output(self.phone_output, f"\n Facebook:")
|
|
self.append_output(self.phone_output, f" • Search: https://facebook.com/search/top/?q={phone}")
|
|
self.append_output(self.phone_output, f" • Try password reset to see if registered")
|
|
|
|
self.append_output(self.phone_output, f"\n WhatsApp:")
|
|
self.append_output(self.phone_output, f" • Check if number is registered on WhatsApp")
|
|
self.append_output(self.phone_output, f" • View profile picture if available")
|
|
|
|
self.append_output(self.phone_output, f"\n Telegram:")
|
|
self.append_output(self.phone_output, f" • Check: https://t.me/{phone}")
|
|
|
|
self.append_output(self.phone_output, f"\n TrueCaller:")
|
|
self.append_output(self.phone_output, f" • https://www.truecaller.com/search/{phone}")
|
|
|
|
self.append_output(self.phone_output, f"\n Google Search:")
|
|
self.append_output(self.phone_output, f" • \"{phone}\" OR \"{phone[:3]}-{phone[3:6]}-{phone[6:]}\"")
|
|
|
|
def bitcoin_lookup(self, address):
|
|
"""Bitcoin address lookup"""
|
|
self.clear_output(self.crypto_output)
|
|
self.append_output(self.crypto_output, f"{'='*100}\nBITCOIN ADDRESS LOOKUP: {address}\n{'='*100}\n")
|
|
|
|
self.append_output(self.crypto_output, f"\n[+] FREE BITCOIN BLOCKCHAIN EXPLORERS:")
|
|
self.append_output(self.crypto_output, f" • Blockchain.com: https://blockchain.com/btc/address/{address}")
|
|
self.append_output(self.crypto_output, f" • Blockchair: https://blockchair.com/bitcoin/address/{address}")
|
|
self.append_output(self.crypto_output, f" • BTC.com: https://btc.com/{address}")
|
|
self.append_output(self.crypto_output, f" • Blockcypher: https://live.blockcypher.com/btc/address/{address}")
|
|
|
|
self.append_output(self.crypto_output, f"\n[+] ANALYSIS TOOLS:")
|
|
self.append_output(self.crypto_output, f" • WalletExplorer: https://walletexplorer.com/")
|
|
self.append_output(self.crypto_output, f" • OXT: https://oxt.me/")
|
|
|
|
def ethereum_lookup(self, address):
|
|
"""Ethereum address lookup"""
|
|
self.clear_output(self.crypto_output)
|
|
self.append_output(self.crypto_output, f"{'='*100}\nETHEREUM ADDRESS LOOKUP: {address}\n{'='*100}\n")
|
|
|
|
self.append_output(self.crypto_output, f"\n[+] FREE ETHEREUM BLOCKCHAIN EXPLORERS:")
|
|
self.append_output(self.crypto_output, f" • Etherscan: https://etherscan.io/address/{address}")
|
|
self.append_output(self.crypto_output, f" • Blockchair: https://blockchair.com/ethereum/address/{address}")
|
|
self.append_output(self.crypto_output, f" • Etherchain: https://etherchain.org/account/{address}")
|
|
|
|
self.append_output(self.crypto_output, f"\n[+] TOKEN TRACKERS:")
|
|
self.append_output(self.crypto_output, f" • View ERC-20 tokens")
|
|
self.append_output(self.crypto_output, f" • View ERC-721 NFTs")
|
|
self.append_output(self.crypto_output, f" • Transaction history")
|
|
|
|
def crypto_multi_chain(self, address):
|
|
"""Multi-blockchain search"""
|
|
self.clear_output(self.crypto_output)
|
|
self.append_output(self.crypto_output, f"{'='*100}\nMULTI-CHAIN CRYPTO SEARCH: {address}\n{'='*100}\n")
|
|
|
|
chains = {
|
|
'Bitcoin': f'https://blockchair.com/bitcoin/address/{address}',
|
|
'Ethereum': f'https://etherscan.io/address/{address}',
|
|
'Litecoin': f'https://blockchair.com/litecoin/address/{address}',
|
|
'Bitcoin Cash': f'https://blockchair.com/bitcoin-cash/address/{address}',
|
|
'Dogecoin': f'https://blockchair.com/dogecoin/address/{address}',
|
|
'Ripple': f'https://xrpscan.com/account/{address}',
|
|
'Cardano': f'https://cardanoscan.io/address/{address}',
|
|
'Polkadot': f'https://polkascan.io/polkadot/account/{address}',
|
|
'Solana': f'https://explorer.solana.com/address/{address}',
|
|
'Polygon': f'https://polygonscan.com/address/{address}',
|
|
'BSC': f'https://bscscan.com/address/{address}',
|
|
}
|
|
|
|
self.append_output(self.crypto_output, f"\n[+] BLOCKCHAIN EXPLORERS (Try all chains):\n")
|
|
for chain, url in chains.items():
|
|
self.append_output(self.crypto_output, f" {chain}: {url}")
|
|
|
|
self.append_output(self.crypto_output, f"\n[+] MULTI-CHAIN TOOLS:")
|
|
self.append_output(self.crypto_output, f" • Blockchair (Multi-chain): https://blockchair.com/search?q={address}")
|
|
self.append_output(self.crypto_output, f" • DeBank (DeFi): https://debank.com/profile/{address}")
|
|
self.append_output(self.crypto_output, f" • Zapper (Portfolio): https://zapper.fi/account/{address}")
|
|
|
|
def pastebin_search(self, term):
|
|
"""Search Pastebin and similar sites"""
|
|
self.clear_output(self.leaks_output)
|
|
self.append_output(self.leaks_output, f"{'='*100}\nPASTEBIN SEARCH: {term}\n{'='*100}\n")
|
|
|
|
self.append_output(self.leaks_output, f"\n[+] PASTE SITE SEARCH ENGINES:")
|
|
self.append_output(self.leaks_output, f" • Pastebin: https://pastebin.com/search?q={term}")
|
|
self.append_output(self.leaks_output, f" • Google Dork: site:pastebin.com \"{term}\"")
|
|
self.append_output(self.leaks_output, f" • Pastebins Monitor: https://psbdmp.ws/")
|
|
|
|
self.append_output(self.leaks_output, f"\n[+] OTHER PASTE SITES:")
|
|
sites = ['pastebin.com', 'ghostbin.com', 'dpaste.com', 'privatebin.net',
|
|
'hastebin.com', 'paste.ee', 'justpaste.it', 'rentry.co']
|
|
|
|
for site in sites:
|
|
self.append_output(self.leaks_output, f" • Google: site:{site} \"{term}\"")
|
|
|
|
def github_dork_search(self, term):
|
|
"""GitHub dork search for sensitive data"""
|
|
self.clear_output(self.leaks_output)
|
|
self.append_output(self.leaks_output, f"{'='*100}\nGITHUB DORK SEARCH: {term}\n{'='*100}\n")
|
|
|
|
dorks = [
|
|
f'{term} password',
|
|
f'{term} api_key',
|
|
f'{term} apikey',
|
|
f'{term} access_token',
|
|
f'{term} secret',
|
|
f'{term} private_key',
|
|
f'{term} credentials',
|
|
f'{term} config',
|
|
f'{term} .env',
|
|
f'{term} db_password',
|
|
f'{term} aws_secret',
|
|
f'filename:.env {term}',
|
|
f'filename:config.php {term}',
|
|
f'filename:database.yml {term}',
|
|
f'extension:pem {term}',
|
|
f'extension:key {term}',
|
|
]
|
|
|
|
self.append_output(self.leaks_output, f"\n[+] GITHUB SEARCH QUERIES ({len(dorks)}):\n")
|
|
for dork in dorks:
|
|
search_url = f"https://github.com/search?q={quote(dork)}&type=code"
|
|
self.append_output(self.leaks_output, f" {dork}")
|
|
self.append_output(self.leaks_output, f" URL: {search_url}\n")
|
|
|
|
def code_leak_search(self, term):
|
|
"""Search for code leaks"""
|
|
self.clear_output(self.leaks_output)
|
|
self.append_output(self.leaks_output, f"{'='*100}\nCODE LEAK SEARCH: {term}\n{'='*100}\n")
|
|
|
|
self.append_output(self.leaks_output, f"\n[+] CODE SEARCH ENGINES:")
|
|
self.append_output(self.leaks_output, f" • GitHub: https://github.com/search?q={term}&type=code")
|
|
self.append_output(self.leaks_output, f" • GitLab: https://gitlab.com/search?search={term}")
|
|
self.append_output(self.leaks_output, f" • Bitbucket: https://bitbucket.org/search?q={term}")
|
|
self.append_output(self.leaks_output, f" • SearchCode: https://searchcode.com/?q={term}")
|
|
self.append_output(self.leaks_output, f" • PublicWWW: https://publicwww.com/websites/{term}/")
|
|
self.append_output(self.leaks_output, f" • SourceGraph: https://sourcegraph.com/search?q={term}")
|
|
|
|
def paste_sites_search(self, term):
|
|
"""Search across multiple paste sites"""
|
|
self.clear_output(self.leaks_output)
|
|
self.append_output(self.leaks_output, f"{'='*100}\nMULTI-PASTE SITE SEARCH: {term}\n{'='*100}\n")
|
|
|
|
paste_sites = [
|
|
('Pastebin', 'pastebin.com'),
|
|
('GitHub Gist', 'gist.github.com'),
|
|
('Ghostbin', 'ghostbin.com'),
|
|
('Dpaste', 'dpaste.com'),
|
|
('PrivateBin', 'privatebin.net'),
|
|
('Hastebin', 'hastebin.com'),
|
|
('Paste.ee', 'paste.ee'),
|
|
('JustPaste.it', 'justpaste.it'),
|
|
('Rentry', 'rentry.co'),
|
|
('Paste.org', 'paste.org'),
|
|
('Codepad', 'codepad.org'),
|
|
('Ideone', 'ideone.com'),
|
|
('PasteLert', 'andrewmohawk.com/pasteLert'),
|
|
]
|
|
|
|
self.append_output(self.leaks_output, f"\n[+] GOOGLE DORKS FOR PASTE SITES:\n")
|
|
for name, site in paste_sites:
|
|
self.append_output(self.leaks_output, f" {name}: site:{site} \"{term}\"")
|
|
|
|
self.append_output(self.leaks_output, f"\n[+] PASTE MONITORING TOOLS:")
|
|
self.append_output(self.leaks_output, f" • PasteBin Alerts: https://psbdmp.ws/")
|
|
self.append_output(self.leaks_output, f" • Dumpmon: https://twitter.com/dumpmon")
|
|
|
|
def google_dork_mega(self, target):
|
|
"""Generate comprehensive Google dorks"""
|
|
self.clear_output(self.advanced_output)
|
|
self.append_output(self.advanced_output, f"{'='*100}\nGOOGLE DORK MEGA GENERATOR: {target}\n{'='*100}\n")
|
|
|
|
# Comprehensive dork categories
|
|
dorks = {
|
|
'Basic Enumeration': [
|
|
f'site:{target}',
|
|
f'site:*.{target}',
|
|
f'inurl:{target}',
|
|
f'intitle:{target}',
|
|
f'intext:{target}',
|
|
],
|
|
'File Types': [
|
|
f'site:{target} filetype:pdf',
|
|
f'site:{target} filetype:doc',
|
|
f'site:{target} filetype:docx',
|
|
f'site:{target} filetype:xls',
|
|
f'site:{target} filetype:xlsx',
|
|
f'site:{target} filetype:ppt',
|
|
f'site:{target} filetype:pptx',
|
|
f'site:{target} filetype:txt',
|
|
f'site:{target} filetype:csv',
|
|
f'site:{target} filetype:xml',
|
|
f'site:{target} filetype:sql',
|
|
f'site:{target} filetype:log',
|
|
f'site:{target} filetype:bak',
|
|
f'site:{target} filetype:conf',
|
|
f'site:{target} filetype:config',
|
|
f'site:{target} filetype:env',
|
|
],
|
|
'Login & Admin': [
|
|
f'site:{target} inurl:admin',
|
|
f'site:{target} inurl:login',
|
|
f'site:{target} inurl:signin',
|
|
f'site:{target} inurl:auth',
|
|
f'site:{target} inurl:dashboard',
|
|
f'site:{target} inurl:cpanel',
|
|
f'site:{target} intitle:"admin panel"',
|
|
f'site:{target} intitle:"login"',
|
|
],
|
|
'Sensitive Info': [
|
|
f'site:{target} intext:password',
|
|
f'site:{target} intext:username',
|
|
f'site:{target} intext:api_key',
|
|
f'site:{target} intext:secret',
|
|
f'site:{target} intext:token',
|
|
f'site:{target} "confidential"',
|
|
f'site:{target} "internal only"',
|
|
],
|
|
'Directory Listings': [
|
|
f'site:{target} intitle:"index of"',
|
|
f'site:{target} intitle:"index of /" "parent directory"',
|
|
f'site:{target} intitle:"directory listing for"',
|
|
],
|
|
'Configuration Files': [
|
|
f'site:{target} ext:sql',
|
|
f'site:{target} ext:log',
|
|
f'site:{target} ext:env',
|
|
f'site:{target} ext:ini',
|
|
f'site:{target} ext:conf',
|
|
f'site:{target} filename:.htaccess',
|
|
f'site:{target} filename:wp-config.php',
|
|
f'site:{target} filename:web.config',
|
|
],
|
|
'Database Files': [
|
|
f'site:{target} ext:sql "INSERT INTO"',
|
|
f'site:{target} ext:sql "CREATE TABLE"',
|
|
f'site:{target} ext:mdb',
|
|
f'site:{target} ext:db',
|
|
],
|
|
'Email Addresses': [
|
|
f'site:{target} intext:@{target}',
|
|
f'site:{target} "email" OR "e-mail"',
|
|
],
|
|
}
|
|
|
|
for category, dork_list in dorks.items():
|
|
self.append_output(self.advanced_output, f"\n[+] {category.upper()}:")
|
|
for dork in dork_list:
|
|
self.append_output(self.advanced_output, f" {dork}")
|
|
|
|
self.append_output(self.advanced_output, f"\n[!] Total Dorks Generated: {sum(len(v) for v in dorks.values())}")
|
|
|
|
def wayback_deep_scan(self, url):
|
|
"""Deep Wayback Machine scanning"""
|
|
self.clear_output(self.advanced_output)
|
|
self.append_output(self.advanced_output, f"{'='*100}\nWAYBACK DEEP SCAN: {url}\n{'='*100}\n")
|
|
|
|
try:
|
|
api_url = f"http://archive.org/wayback/available?url={url}"
|
|
response = requests.get(api_url, timeout=10)
|
|
|
|
if response.status_code == 200:
|
|
data = response.json()
|
|
if 'archived_snapshots' in data and data['archived_snapshots']:
|
|
closest = data['archived_snapshots'].get('closest')
|
|
if closest:
|
|
self.append_output(self.advanced_output, f"\n[+] LATEST SNAPSHOT:")
|
|
self.append_output(self.advanced_output, f" URL: {closest['url']}")
|
|
self.append_output(self.advanced_output, f" Timestamp: {closest['timestamp']}")
|
|
self.append_output(self.advanced_output, f" Status: {closest['status']}")
|
|
|
|
# CDX API for full history
|
|
cdx_url = f"http://web.archive.org/cdx/search/cdx?url={url}&output=json&limit=1000"
|
|
cdx_response = requests.get(cdx_url, timeout=15)
|
|
|
|
if cdx_response.status_code == 200:
|
|
cdx_data = cdx_response.json()
|
|
if len(cdx_data) > 1: # First row is headers
|
|
self.append_output(self.advanced_output, f"\n[+] TOTAL SNAPSHOTS: {len(cdx_data)-1}")
|
|
|
|
# Show first and last snapshots
|
|
if len(cdx_data) > 2:
|
|
first = cdx_data[1]
|
|
last = cdx_data[-1]
|
|
self.append_output(self.advanced_output, f"\n[+] First Snapshot: {first[1]} ({first[2]})")
|
|
self.append_output(self.advanced_output, f"[+] Last Snapshot: {last[1]} ({last[2]})")
|
|
|
|
self.append_output(self.advanced_output, f"\n[+] VIEW ALL SNAPSHOTS:")
|
|
self.append_output(self.advanced_output, f" https://web.archive.org/web/*/{url}")
|
|
|
|
except Exception as e:
|
|
self.append_output(self.advanced_output, f"[-] Error: {str(e)}")
|
|
|
|
def web_archive_hunt(self, domain):
|
|
"""Hunt through web archives"""
|
|
self.clear_output(self.advanced_output)
|
|
self.append_output(self.advanced_output, f"{'='*100}\nWEB ARCHIVE HUNT: {domain}\n{'='*100}\n")
|
|
|
|
self.append_output(self.advanced_output, f"\n[+] WEB ARCHIVE SOURCES:\n")
|
|
|
|
self.append_output(self.advanced_output, f" • Wayback Machine: https://web.archive.org/web/*/{domain}")
|
|
self.append_output(self.advanced_output, f" • Archive.today: https://archive.today/{domain}")
|
|
self.append_output(self.advanced_output, f" • Google Cache: cache:{domain}")
|
|
self.append_output(self.advanced_output, f" • Bing Cache: Use Bing search with cache:{domain}")
|
|
self.append_output(self.advanced_output, f" • CommonCrawl: https://index.commoncrawl.org/")
|
|
|
|
self.append_output(self.advanced_output, f"\n[+] HISTORICAL DATA TOOLS:")
|
|
self.append_output(self.advanced_output, f" • ViewDNS History: https://viewdns.info/iphistory/?domain={domain}")
|
|
self.append_output(self.advanced_output, f" • SecurityTrails: https://securitytrails.com/domain/{domain}/history/a")
|
|
self.append_output(self.advanced_output, f" • DNS History: https://completedns.com/dns-history/")
|
|
|
|
def company_intelligence(self, company):
|
|
"""Company/Business intelligence gathering"""
|
|
self.clear_output(self.advanced_output)
|
|
self.append_output(self.advanced_output, f"{'='*100}\nCOMPANY INTELLIGENCE: {company}\n{'='*100}\n")
|
|
|
|
self.append_output(self.advanced_output, f"\n[+] BUSINESS INFORMATION:")
|
|
self.append_output(self.advanced_output, f" • Crunchbase: https://crunchbase.com/organization/{company}")
|
|
self.append_output(self.advanced_output, f" • LinkedIn Company: https://linkedin.com/company/{company}")
|
|
self.append_output(self.advanced_output, f" • Bloomberg: https://bloomberg.com/profile/company/{company}")
|
|
self.append_output(self.advanced_output, f" • Glassdoor: https://glassdoor.com/Search/results.htm?keyword={company}")
|
|
|
|
self.append_output(self.advanced_output, f"\n[+] FINANCIAL DATA:")
|
|
self.append_output(self.advanced_output, f" • Yahoo Finance: https://finance.yahoo.com/quote/{company}")
|
|
self.append_output(self.advanced_output, f" • SEC Edgar: https://sec.gov/cgi-bin/browse-edgar?company={company}")
|
|
self.append_output(self.advanced_output, f" • OpenCorporates: https://opencorporates.com/companies?q={company}")
|
|
|
|
self.append_output(self.advanced_output, f"\n[+] EMPLOYEE DATA:")
|
|
self.append_output(self.advanced_output, f" • LinkedIn Search: site:linkedin.com \"{company}\"")
|
|
self.append_output(self.advanced_output, f" • Hunter.io: https://hunter.io/search/{company}")
|
|
self.append_output(self.advanced_output, f" • RocketReach: https://rocketreach.co/")
|
|
|
|
self.append_output(self.advanced_output, f"\n[+] TECH STACK:")
|
|
self.append_output(self.advanced_output, f" • BuiltWith: https://builtwith.com/{company}")
|
|
self.append_output(self.advanced_output, f" • Wappalyzer: https://wappalyzer.com/")
|
|
self.append_output(self.advanced_output, f" • StackShare: https://stackshare.io/{company}")
|
|
|
|
# ==================== UTILITY FUNCTIONS ====================
|
|
|
|
def extract_domain(self, url):
|
|
"""Extract domain from URL"""
|
|
if not url.startswith(('http://', 'https://')):
|
|
return url
|
|
parsed = urlparse(url)
|
|
return parsed.netloc or parsed.path
|
|
|
|
def export_json(self):
|
|
"""Export results to JSON"""
|
|
if not self.results:
|
|
messagebox.showwarning("No Results", "No results to export")
|
|
return
|
|
|
|
filepath = filedialog.asksaveasfilename(
|
|
defaultextension=".json",
|
|
filetypes=[("JSON files", "*.json"), ("All files", "*.*")]
|
|
)
|
|
|
|
if filepath:
|
|
try:
|
|
with open(filepath, 'w') as f:
|
|
json.dump(self.results, f, indent=4, default=str)
|
|
messagebox.showinfo("Success", f"Results exported to {filepath}")
|
|
except Exception as e:
|
|
messagebox.showerror("Error", f"Failed to export: {str(e)}")
|
|
|
|
def export_txt(self):
|
|
"""Export results to TXT"""
|
|
if not self.results:
|
|
messagebox.showwarning("No Results", "No results to export")
|
|
return
|
|
|
|
filepath = filedialog.asksaveasfilename(
|
|
defaultextension=".txt",
|
|
filetypes=[("Text files", "*.txt"), ("All files", "*.*")]
|
|
)
|
|
|
|
if filepath:
|
|
try:
|
|
with open(filepath, 'w') as f:
|
|
f.write("="*100 + "\n")
|
|
f.write("DARKSINT - OVERPOWERED OSINT PLATFORM\n")
|
|
f.write("="*100 + "\n")
|
|
f.write(f"Generated: {datetime.now()}\n")
|
|
f.write("="*100 + "\n\n")
|
|
|
|
for key, value in self.results.items():
|
|
f.write(f"\n{'='*100}\n")
|
|
f.write(f"{key.upper()}\n")
|
|
f.write(f"{'='*100}\n")
|
|
f.write(json.dumps(value, indent=2, default=str))
|
|
f.write("\n\n")
|
|
|
|
messagebox.showinfo("Success", f"Results exported to {filepath}")
|
|
except Exception as e:
|
|
messagebox.showerror("Error", f"Failed to export: {str(e)}")
|
|
|
|
def export_html(self):
|
|
"""Export results to HTML report"""
|
|
if not self.results:
|
|
messagebox.showwarning("No Results", "No results to export")
|
|
return
|
|
|
|
filepath = filedialog.asksaveasfilename(
|
|
defaultextension=".html",
|
|
filetypes=[("HTML files", "*.html"), ("All files", "*.*")]
|
|
)
|
|
|
|
if filepath:
|
|
try:
|
|
with open(filepath, 'w') as f:
|
|
f.write("""
|
|
<!DOCTYPE html>
|
|
<html>
|
|
<head>
|
|
<title>DARKSINT Report</title>
|
|
<style>
|
|
body { font-family: 'Courier New', monospace; background: #0a0a0a; color: #00ff00; padding: 20px; }
|
|
h1 { color: #00ff00; text-align: center; }
|
|
.section { background: #1a1a1a; padding: 20px; margin: 20px 0; border: 2px solid #00ff00; }
|
|
.section h2 { color: #00ff00; }
|
|
pre { background: #000; padding: 10px; overflow-x: auto; }
|
|
</style>
|
|
</head>
|
|
<body>
|
|
<h1>⚡ DARKSINT OSINT REPORT ⚡</h1>
|
|
<p style="text-align:center;">Generated: """ + str(datetime.now()) + """</p>
|
|
""")
|
|
|
|
for key, value in self.results.items():
|
|
f.write(f'<div class="section">')
|
|
f.write(f'<h2>{key.upper()}</h2>')
|
|
f.write(f'<pre>{json.dumps(value, indent=2, default=str)}</pre>')
|
|
f.write(f'</div>')
|
|
|
|
f.write('</body></html>')
|
|
|
|
messagebox.showinfo("Success", f"HTML report exported to {filepath}")
|
|
except Exception as e:
|
|
messagebox.showerror("Error", f"Failed to export: {str(e)}")
|
|
|
|
def clear_all(self):
|
|
"""Clear all results"""
|
|
if messagebox.askyesno("Confirm", "Clear all results?"):
|
|
self.results = {}
|
|
for widget in [self.domain_output, self.email_output, self.username_output,
|
|
self.ip_output, self.phone_output, self.crypto_output,
|
|
self.leaks_output, self.advanced_output]:
|
|
widget.delete(1.0, tk.END)
|
|
self.update_status("ALL CLEARED")
|
|
|
|
def main():
|
|
root = tk.Tk()
|
|
app = DarkSINT(root)
|
|
root.mainloop()
|
|
|
|
if __name__ == "__main__":
|
|
main()
|