Pārlūkot izejas kodu

fix for manga download

Phil 3 gadi atpakaļ
vecāks
revīzija
f71c9df612
5 mainītis faili ar 8 papildinājumiem un 9 dzēšanām
  1. 3 3
      auth.py
  2. 3 1
      main.py
  3. 1 1
      parsers/chapter.py
  4. 0 3
      parsers/episode.py
  5. 1 1
      settings.cfg

+ 3 - 3
auth.py

@@ -9,7 +9,7 @@ def checkLogin():
     #f = open('cookies','rb')
     sess = requests.session()
     #sess.cookies.update(pickle.load(f))
-    response = sess.get('https://proxer.net/ucp')
+    response = sess.get('https://proxer.me/ucp')
     data = response.content
     dump = open('dummys/ucp_loggedout.html','w')
     dump.write(str(data))
@@ -19,7 +19,7 @@ def checkLogin():
 def login(user,passw):
     sess = requests.Session()
     #obtain token
-    response = requests.get('https://proxer.net/')
+    response = requests.get('https://proxer.me/')
     data = response.content
     tparse = tokenParser()
     tparse.feed(str(data))
@@ -28,7 +28,7 @@ def login(user,passw):
     cookies = response.cookies
 
     #make auth
-    response = sess.post('https://proxer.net/login?' + token + '=1',
+    response = sess.post('https://proxer.me/login?' + token + '=1',
         data={'username':user,'password':passw,'remember':'1','submit':'login'},
         cookies=cookies)
     cookies = response.cookies

+ 3 - 1
main.py

@@ -89,7 +89,9 @@ def chapter(sess,ep):
         if uin == '1':
             print(path)
             for p in tqdm(chapPars.images):
-                call(['curl',chapPars.serverurl + p['file'],'-o',path+str(count).zfill(2)+p['file'][p['file'].rindex('.'):]])
+                #put https in front because changes from proxer?
+                call(['curl','https://'+chapPars.serverurl + p['file'],'-o',path+str(count).zfill(2)+p['file'][p['file'].rindex('.'):]])
+                #call(['curl',chapPars.serverurl + p['file'],'-o',path+str(count).zfill(2)+p['file'][p['file'].rindex('.'):]])
                 count +=1
 
         if uin == '2':

+ 1 - 1
parsers/chapter.py

@@ -18,7 +18,7 @@ class chapterParser(HTMLParser):
             if 'var serverurl' in data:
                 self.serverurl = data[data.index('serverurl')+16:len(data)-5]
                 self.serverurl = self.serverurl.replace('\\','').replace(';','')
-                #print("serverurl " + self.serverurl)
+                print("serverurl " + self.serverurl)
                 arrString = data[data.index('var pages')+ 14:]
                 file = ""
                 tempStr = ""

+ 0 - 3
parsers/episode.py

@@ -22,7 +22,6 @@ class episodeParser(HTMLParser):
             lines = data.replace('\\n','').split(';')
             streams = lines[0][lines[0].index('['):]
             streams = streams.split('}')
-            print(streams[0])
             
             typeIndex = streams[0].index('type') + 7
             if streams[0][typeIndex:typeIndex + 13] == 'proxer-stream':
@@ -31,11 +30,9 @@ class episodeParser(HTMLParser):
                 code = streams[0][codeStart:]
                 code = code[:code.index('"')]
                 self.code = code
-                print(code)
                 urlStart = streams[0].index('replace') + 10
                 url = streams[0][urlStart:streams[0].index('.html') + 5]
                 self.url = url.replace("\\\\","")
-                print("url: " + url)
 
     def handle_endtag(self, tag):
         if tag == 'script' and self.inScript:

+ 1 - 1
settings.cfg

@@ -1,5 +1,5 @@
 [paths]
-base_url:https://proxer.net
+base_url:https://proxer.me
 anime_path:./anime
 manga_path:./manga
 cookie_jar:./cookies