if domain notin unique_domains: unique_domains.add(domain) filtered_urls.append(url) # 输出结果 for filtered_url in filtered_urls: print(filtered_url)
defextract_success_urls(file_path): withopen(file_path, 'r', encoding='utf-8') as file: lines = file.readlines()
success_urls = []
for line in lines: matches = re.findall(r'\[SUCCESS\]\s*(https?://[^\s\[\]]+)', line) success_urls.extend(matches) filter_unique_domain_urls(success_urls)