Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Contents/Code/Parsing.py
Original file line number Diff line number Diff line change
Expand Up @@ -372,7 +372,7 @@ def GetSearchResults(query=None,type=None,imdb_id=None):

if (match):
res.title = match.group(1).strip()
res.releasedate = match.group(2).strip()
res.year = int(match.group(2).strip())

# Extract out URL
res.id = item.a['href'][1:]
Expand Down
194 changes: 150 additions & 44 deletions Contents/Code/__init__.py

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion Contents/DefaultPrefs.json
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
{
"id": "favourite_notify_email_test",
"type": "bool",
"label": "Send Test Email",
"label": "Show 'Send Test Email'",
"default": "false"
},
{
Expand Down
1 change: 1 addition & 0 deletions Contents/Libraries/Shared/FileLog.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
import sysimport osfrom datetime import datetime def FileLog(fn_name, msg): message = str(datetime.now()) + " - [" + str(fn_name) + "]: " + msg + "\n" f = None try: f = open(sys.path[0] + os.sep + "PLEX_LMWT_Favs.log", 'a') f.write(message) finally: if (f): f.close()
Expand Down
2 changes: 1 addition & 1 deletion Contents/Services/URL/FileNuke/ServiceCode.pys
Original file line number Diff line number Diff line change
@@ -1 +1 @@
import re, string, urllib2from datetime import datefrom BeautifulSoup import BeautifulSoupUSER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/534.51.22 (KHTML, like Gecko) Version/5.1.1 Safari/534.51.22'def NormalizeURL(url): #Log("*********** In FileNuke normalizeURL") # Deal with special providerInfo URL built up by plugin to return # info about this provider. For all other normal URLs, do nothing. if ("providerinfo" in url): # Extract out domain. match = re.search("(filenuke)", url.lower()) if (match is None): return url try: show = Prefs["show_" + match.group(1)] except Exception, ex: show = False if (show): return url + "&visible=true" else: return url else: return url def MetadataObjectForURL(url): #Log('In MetadataObjectForURL for FileNuke (' + url + ')') return VideoClipObject( title = 'FileNuke Redirect Page', summary = 'FileNuke Redirect Page', thumb = None, ) def MediaObjectsForURL(url): #Log('In MediaObjectsForURL for FileNuke (' + url + ')') return [ MediaObject( parts = [PartObject(key=Callback(PlayVideo, url=url))], ) ]@indirectdef PlayVideo(url): HTTP.Headers['User-Agent'] = USER_AGENT # Request Initial Provider page. try: #Log('Requesting ' + url) soup = BeautifulSoup(HTTP.Request(url).content) except Exception, ex: return LogProviderError("Error whilst retrieving initial provider page (" + url + ")", ex) # Extract out these form elements if present... try: formElems = ['op', 'id', 'fname', 'method_free', 'referer', 'usr_login'] params = {} for formElem in formElems: formElemVal = soup.find('input', {'name' : formElem })['value'] params[formElem] = formElemVal except Exception, ex: return LogProviderError("Error whilst extracting out form elemnts to navigate to 2nd page.",ex) # Navigate to 2nd page. try: contents = HTTP.Request(url, values=params, headers={ 'Referer': url }, cacheTime=0).content soup = BeautifulSoup(contents) except Exception, ex: return LogProviderError("Error whilst retrieving second provider page (" + url + ")", ex) # Extract out JS packed final video URL. script_elems = soup.find('div', { 'id': 'player_code' }).findAll('script') elems = None video_url = None for script_elem in script_elems: script = script_elem.string if script is None: continue #Log(script) # Look for substitution values. sub_vals = re.search("\d{2},'([^']*)'.split", script) if (sub_vals is None): continue elems = sub_vals.group(1).split('|') #Log(elems) # Look for url to substitute values into. url_re = re.search("([0-9a-z]://[0-9a-z]\.[0-9a-z]\.[0-9a-z]\.[0-9a-z]/[0-9a-z]/[0-9a-z]/[0-9a-z]\.[0-9a-z])", script) #Log(url_re.group(1)) if (url_re is None or url_re.group(1) is None): continue video_url = url_re.group(1) if (elems is None or video_url is None): return LogProviderError("Error whilst extracting out / depacking video URL elements", None) # Create dict to map url sub keys to sub values. alphadict = dict() for index_cnt in range(0, 2): index = index_cnt * len(string.digits + string.ascii_lowercase) strindex = str(index_cnt) if index_cnt > 0 else "" for cnt in range(0, len(string.digits + string.ascii_lowercase)): alphadict[strindex + (string.digits + string.ascii_lowercase)[cnt]] = cnt + index def SubElem(matchObj): val = elems[alphadict[matchObj.group(0)]] if (val == ""): val = matchObj.group(0) return val # Sub values into url to get final url. final_url = re.sub("[0-9a-z]{1,2}", SubElem, video_url) Log(final_url) oc = ObjectContainer( objects = [ VideoClipObject( items = [ MediaObject( parts = [PartObject(key=final_url)] ) ] ) ] ) # Might as well set a sensible user agent string. oc.user_agent = USER_AGENT return oc def LogProviderError(msg="", ex=None): Log("************************** PROVIDER ERROR: " + msg) return []
import re, string, urllib2from datetime import datefrom BeautifulSoup import BeautifulSoupUSER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/534.51.22 (KHTML, like Gecko) Version/5.1.1 Safari/534.51.22'def NormalizeURL(url): #Log("*********** In FileNuke normalizeURL") # Deal with special providerInfo URL built up by plugin to return # info about this provider. For all other normal URLs, do nothing. if ("providerinfo" in url): # Extract out domain. match = re.search("(filenuke)", url.lower()) if (match is None): return url try: show = Prefs["show_" + match.group(1)] except Exception, ex: show = False if (show): return url + "&visible=true" else: return url else: return url def MetadataObjectForURL(url): #Log('In MetadataObjectForURL for FileNuke (' + url + ')') return VideoClipObject( title = 'FileNuke Redirect Page', summary = 'FileNuke Redirect Page', thumb = None, ) def MediaObjectsForURL(url): #Log('In MediaObjectsForURL for FileNuke (' + url + ')') return [ MediaObject( parts = [PartObject(key=Callback(PlayVideo, url=url))], ) ]@indirectdef PlayVideo(url): HTTP.Headers['User-Agent'] = USER_AGENT # Request Initial Provider page. try: #Log('Requesting ' + url) soup = BeautifulSoup(HTTP.Request(url).content) except Exception, ex: return LogProviderError("Error whilst retrieving initial provider page (" + url + ")", ex) # Extract out these form elements if present... try: formElems = ['op', 'id', 'fname', 'method_free', 'referer', 'usr_login'] params = {} for formElem in formElems: formElemVal = soup.find('input', {'name' : formElem })['value'] params[formElem] = formElemVal except Exception, ex: return LogProviderError("Error whilst extracting out form elemnts to navigate to 2nd page.",ex) # Navigate to 2nd page. try: contents = HTTP.Request(url, values=params, headers={ 'Referer': url }, cacheTime=0).content soup = BeautifulSoup(contents) except Exception, ex: return LogProviderError("Error whilst retrieving second provider page (" + url + ")", ex) # Extract out JS packed final video URL. script_elems = soup.find('div', { 'id': 'player_code' }).findAll('script') elems = None video_url = None for script_elem in script_elems: script = script_elem.string if script is None: continue #Log(script) # Look for substitution values. sub_vals = re.search("\d{2},'([^']*)'.split", script) if (sub_vals is None): continue elems = sub_vals.group(1).split('|') #Log(elems) # Look for url to substitute values into. url_re = re.search("([0-9a-z]://[0-9a-z]\.[0-9a-z]\.[0-9a-z]\.[0-9a-z]/[0-9a-z]/[0-9a-z]/[0-9a-z]\.[0-9a-z])", script) #Log(url_re.group(1)) if (url_re is None or url_re.group(1) is None): continue video_url = url_re.group(1) if (elems is None or video_url is None): return LogProviderError("Error whilst extracting out / depacking video URL elements", None) # Create dict to map url sub keys to sub values. alphadict = dict() for index_cnt in range(0, 2): index = index_cnt * len(string.digits + string.ascii_lowercase) strindex = str(index_cnt) if index_cnt > 0 else "" for cnt in range(0, len(string.digits + string.ascii_lowercase)): alphadict[strindex + (string.digits + string.ascii_lowercase)[cnt]] = cnt + index def SubElem(matchObj): val = elems[alphadict[matchObj.group(0)]] if (val == ""): val = matchObj.group(0) return val # Sub values into url to get final url. final_url = re.sub("[0-9a-z]{1,2}", SubElem, video_url) Log(final_url) oc = ObjectContainer( objects = [ VideoClipObject( items = [ MediaObject( parts = [PartObject(key=final_url)] ) ] ) ] ) # Might as well set a sensible user agent string. oc.user_agent = USER_AGENT return oc def LogProviderError(msg="", ex=None): Log("************************** PROVIDER ERROR: " + msg) raise Exception(msg) return []
Expand Down
2 changes: 1 addition & 1 deletion Contents/Services/URL/LMWT/ServiceCode.pys
Original file line number Diff line number Diff line change
@@ -1 +1 @@
import re, urlparse, cgi, base64, urllib2USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/534.51.22 (KHTML, like Gecko) Version/5.1.1 Safari/534.51.22'PLEX_URL = "http://127.0.0.1:32400"PLUGIN_URL = PLEX_URL + "/video/lmwt"def NormalizeURL(url): return url def MetadataObjectForURL(url): #Log('In MetadataObjectForURL for LMWT (' + url + ')') video = None # Plugin should have access to info about this URL if user used plugin to launch video. # Bad things can happen here. Still want to run rest of code if possible though... try: request = urllib2.Request(PLUGIN_URL + "/mediainfo/%s" % String.Encode(url)) mediainfo = JSON.ObjectFromString(urllib2.urlopen(request).read()) #Log(mediainfo) video = VideoClipObject( title=mediainfo['title'], summary=mediainfo['summary'], art=mediainfo['background'], thumb= mediainfo['poster'], rating = float(mediainfo['rating']), duration=mediainfo['duration'], year=mediainfo['year'], originally_available_at= ( date.fromordinal(mediainfo['release_date']) if ('release_date' in mediainfo and mediainfo['release_date']) else None ), genres=mediainfo['genres'], ) except Exception, ex: Log(ex) #Log(video) if video is None: # Return bare minimum. This is never shown to users. video = VideoClipObject( title = 'LMWT Redirect Page', summary = 'LMWT Redirect Page', thumb = None, ) return videodef MediaObjectsForURL(url): #Log('In MediaObjectsForURL for LMWT') ret = [] ret.append( MediaObject( parts = [PartObject(key=Callback(PlayVideo, url=url))], ) ) return ret@indirectdef PlayVideo(url): # Extract out and break down query string of the LMWT Provider URL... lmwt_qs_args = cgi.parse_qs(urlparse.urlparse(url).query) # Extract out provider URL provider_url = base64.b64decode(lmwt_qs_args['url'][0]) media_objects = URLService.MediaObjectsForURL(provider_url) if (len(media_objects) > 0): PlaybackStarted(url=url) return ObjectContainer( objects = [ VideoClipObject( items = media_objects ) ] ) def LogProviderError(msg="", ex=None): Log("************************** PROVIDER ERROR: " + msg) return [] ################################################################################################### LMWT Plugin specific helper methods.def PlaybackStarted(url): # Bad things can happen here. Let's try to be neat though.... try: caller = "lmwt" # We may be playing the video on behalf of another plugin. In that case, we'll need to # call that plugin's PlaybackStarted method insted of our own. # # Check if this is the case by seeing who originally called for the source listing. try: request = urllib2.Request( PLUGIN_URL + "/playback/caller/" + String.Encode(url) ) caller = JSON.ObjectFromString(urllib2.urlopen(request).read())['caller'] except Exception, ex: pass # Get the media info object that was built by this plugin when generating the # source listing. We'll use the info in that to talk to whatever plugin we need to # tell the item has started playing. request = urllib2.Request(PLUGIN_URL + "/mediainfo/" + String.Encode(url)) mediainfo = JSON.ObjectFromString(urllib2.urlopen(request).read()) # Use the information from the mediainfo to call the PlaybackStarted method of # whatever plugin requested this. url = PLEX_URL + '/video/' + caller + "/playback/%s" % mediainfo['id'] if (mediainfo['ep_num']): url += "/" + str(mediainfo['season']) + "/" + str(mediainfo['ep_num']) Log(url) request = urllib2.Request(url) response = urllib2.urlopen(request) except Exception, ex: Log.Exception("Error whilst trying to mark item as played") pass
import reimport urlparseimport cgiimport base64import urllib2from datetime import dateUSER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/534.51.22 (KHTML, like Gecko) Version/5.1.1 Safari/534.51.22'PLEX_URL = "http://127.0.0.1:32400"PLUGIN_URL = PLEX_URL + "/video/lmwt"def NormalizeURL(url): return url def MetadataObjectForURL(url): Log('In MetadataObjectForURL for LMWT (' + url + ')') video = None # Plugin should have access to info about this URL if user used plugin to launch video. # Bad things can happen here. Still want to run rest of code if possible though... try: request = urllib2.Request(PLUGIN_URL + "/mediainfo/%s" % String.Encode(url)) mediainfo = JSON.ObjectFromString(urllib2.urlopen(request).read()) #Log(mediainfo) video = VideoClipObject( title=mediainfo['title'], summary=mediainfo['summary'], art=mediainfo['background'], thumb= mediainfo['poster'], rating = float(mediainfo['rating']), duration=mediainfo['duration'], year=mediainfo['year'], originally_available_at= ( date.fromordinal(mediainfo['release_date']) if ('release_date' in mediainfo and mediainfo['release_date']) else None ), genres=mediainfo['genres'], ) except Exception, ex: Log(ex) #Log(video) if video is None: # Return bare minimum. This is never shown to users. video = VideoClipObject( title = 'LMWT Redirect Page', summary = 'LMWT Redirect Page', thumb = None, ) return videodef MediaObjectsForURL(url): #Log('In MediaObjectsForURL for LMWT') ret = [] ret.append( MediaObject( parts = [PartObject(key=Callback(PlayVideo, url=url))], ) ) return ret@indirectdef PlayVideo(url): # Extract out and break down query string of the LMWT Provider URL... lmwt_qs_args = cgi.parse_qs(urlparse.urlparse(url).query) # Extract out provider URL provider_url = base64.b64decode(lmwt_qs_args['url'][0]) media_objects = URLService.MediaObjectsForURL(provider_url) if (len(media_objects) > 0): PlaybackStarted(url=url) return ObjectContainer( objects = [ VideoClipObject( items = media_objects ) ] ) ################################################################################################### LMWT Plugin specific helper methods.def PlaybackStarted(url): # Bad things can happen here. Let's try to be neat though.... try: caller = "lmwt" # We may be playing the video on behalf of another plugin. In that case, we'll need to # call that plugin's PlaybackStarted method insted of our own. # # Check if this is the case by seeing who originally called for the source listing. try: request = urllib2.Request( PLUGIN_URL + "/playback/caller/" + String.Encode(url) ) response = JSON.ObjectFromString(urllib2.urlopen(request).read()) if (response['caller']): caller = response['caller'] except Exception, ex: pass # Get the media info object that was built by this plugin when generating the # source listing. We'll use the info in that to talk to whatever plugin we need to # tell the item has started playing. request = urllib2.Request(PLUGIN_URL + "/mediainfo/" + String.Encode(url)) mediainfo = JSON.ObjectFromString(urllib2.urlopen(request).read()) # Use the information from the mediainfo to call the PlaybackStarted method of # whatever plugin requested this. url = PLEX_URL + '/video/' + caller + "/playback/%s" % mediainfo['id'] if (mediainfo['ep_num']): url += "/" + str(mediainfo['season']) + "/" + str(mediainfo['ep_num']) Log(url) request = urllib2.Request(url) response = urllib2.urlopen(request) except Exception, ex: Log.Exception("Error whilst trying to mark item as played") pass
Expand Down
Loading