@@ -26,7 +26,7 @@ def whois_gather(short_domain):
26
26
logging .info ('WHOIS INFO GATHERING: OK' )
27
27
w = whois .whois (short_domain )
28
28
if w .org is None :
29
- w ['org' ] = 'n/a '
29
+ w ['org' ] = 'Organization name was not extracted '
30
30
logging .info ('WHOIS INFO GATHERING: OK' )
31
31
return w
32
32
except Exception as e :
@@ -110,7 +110,7 @@ def sm_gather(url):
110
110
links = [a ['href' ] for a in soup .find_all ('a' , href = True )]
111
111
categorized_links = {'Facebook' : [], 'Twitter' : [], 'Instagram' : [],
112
112
'Telegram' : [], 'TikTok' : [], 'LinkedIn' : [],
113
- 'VKontakte' : [], 'YouTube' : [], 'Odnoklassniki' : [], 'WeChat' : []}
113
+ 'VKontakte' : [], 'YouTube' : [], 'Odnoklassniki' : [], 'WeChat' : [], 'X.com' : [] }
114
114
115
115
for link in links :
116
116
parsed_url = urlparse (link )
@@ -135,6 +135,8 @@ def sm_gather(url):
135
135
categorized_links ['WeChat' ].append (urllib .parse .unquote (link ))
136
136
elif hostname and (hostname == 'ok.ru' or hostname .endswith ('.ok.ru' )):
137
137
categorized_links ['Odnoklassniki' ].append (urllib .parse .unquote (link ))
138
+ elif hostname and (hostname == 'x.com' or hostname .endswith ('.x.com' )):
139
+ categorized_links ['X.com' ].append (urllib .parse .unquote (link ))
138
140
139
141
if not categorized_links ['Odnoklassniki' ]:
140
142
categorized_links ['Odnoklassniki' ].append ('Odnoklassniki links were not found' )
@@ -156,6 +158,8 @@ def sm_gather(url):
156
158
categorized_links ['Twitter' ].append ('Twitter links were not found' )
157
159
if not categorized_links ['Facebook' ]:
158
160
categorized_links ['Facebook' ].append ('Facebook links were not found' )
161
+ if not categorized_links ['X.com' ]:
162
+ categorized_links ['X.com' ].append ('X.com links were not found' )
159
163
160
164
return categorized_links
161
165
@@ -209,7 +213,7 @@ def domains_reverse_research(subdomains, report_file_type):
209
213
subdomain_socials_grouped = list (dict (subdomain_socials_grouped ).values ())
210
214
211
215
sd_socials = {'Facebook' : [], 'Twitter' : [], 'Instagram' : [], 'Telegram' : [], 'TikTok' : [], 'LinkedIn' : [],
212
- 'VKontakte' : [], 'YouTube' : [], 'Odnoklassniki' : [], 'WeChat' : []}
216
+ 'VKontakte' : [], 'YouTube' : [], 'Odnoklassniki' : [], 'WeChat' : [], 'X.com' : [] }
213
217
214
218
for inner_list in subdomain_socials_grouped :
215
219
for link in inner_list :
@@ -234,6 +238,8 @@ def domains_reverse_research(subdomains, report_file_type):
234
238
sd_socials ['WeChat' ].append (urllib .parse .unquote (link ))
235
239
elif hostname and (hostname == 'ok.ru' or hostname .endswith ('.ok.ru' )):
236
240
sd_socials ['Odnoklassniki' ].append (urllib .parse .unquote (link ))
241
+ elif hostname and (hostname == 'x.com' or hostname .endswith ('.x.com' )):
242
+ sd_socials ['Odnoklassniki' ].append (urllib .parse .unquote (link ))
237
243
238
244
sd_socials = {k : list (set (v )) for k , v in sd_socials .items ()}
239
245
@@ -242,7 +248,7 @@ def domains_reverse_research(subdomains, report_file_type):
242
248
if not subdomain_ip :
243
249
subdomain_ip = ["No subdomains IP's were found" ]
244
250
245
- if report_file_type == 'pdf' or report_file_type == ' html' :
251
+ if report_file_type == 'html' :
246
252
return subdomain_mails , sd_socials , subdomain_ip
247
253
elif report_file_type == 'xlsx' :
248
254
return subdomain_urls , subdomain_mails , subdomain_ip , sd_socials
0 commit comments