import os
import requests
from time import sleep
from bs4 import BeautifulSoup as bs

current_directory = os.getcwd()
files_in_directory = os.listdir(current_directory)
if("4chan_wallpapers" in files_in_directory):
	current_directory = current_directory + "/4chan_wallpapers"
else:
	print("Creating wallpaper directory...")
	os.system("mkdir 4chan_wallpapers")
	sleep(3)
	current_directory = current_directory + "/4chan_wallpapers"
os.chdir(current_directory)
print("Changing directory...")
sleep(3)
files_in_directory = os.listdir(current_directory)


address = "https://boards.4chan.org/wg/"
response = requests.get(address)

main = bs(response.content, 'html.parser')
threads = main.find_all("a")
for thread in threads:
	#print(thread["href"])
	if("thread/" in thread["href"]):
		if("#" not in thread["href"]):
			if(len(thread["href"].split("/")) == 2):
				thread_link = "https://boards.4chan.org/wg/" + thread["href"]
				print(thread_link)
				response_thread = requests.get(thread_link)
				website = bs(response_thread.content, 'html.parser')
				links = website.find_all("a")
				for link in links:
					if("4cdn.org" in link["href"]):
						image_link = "https://"+link["href"][2:]
						image_file = image_link.split("/")[-1]
						if(image_file not in files_in_directory):
							files_in_directory.append(image_file) # Trusting the download won't fail
							print("Fetching: "+image_link)
							os.system("wget -q "+image_link+" &")	# Becauses we add "&" for speeeed'
							# In the future, do wget -nc to skip already downloaded files