diff --git a/Contents/Code/Parsing.py b/Contents/Code/Parsing.py index 1baf4bb..b68a777 100644 --- a/Contents/Code/Parsing.py +++ b/Contents/Code/Parsing.py @@ -372,7 +372,7 @@ def GetSearchResults(query=None,type=None,imdb_id=None): if (match): res.title = match.group(1).strip() - res.releasedate = match.group(2).strip() + res.year = int(match.group(2).strip()) # Extract out URL res.id = item.a['href'][1:] diff --git a/Contents/Code/__init__.py b/Contents/Code/__init__.py index 5b11c9b..e0b7481 100644 --- a/Contents/Code/__init__.py +++ b/Contents/Code/__init__.py @@ -25,6 +25,8 @@ from RecentItems import BrowsedItems, ViewedItems from Favourites import FavouriteItems +import FileLog + cerealizer.register(MediaInfo) VIDEO_PREFIX = Site.VIDEO_PREFIX @@ -59,6 +61,7 @@ WATCHED_ITEMS_KEY = "USER_VIEWING_HISTORY" FAVOURITE_ITEMS_KEY = "FAVOURITE_ITEMS" ADDITIONAL_SOURCES_KEY = "ADDITIONAL_SOURCES" +LAST_USAGE_TIME_KEY = "LAST_USAGE_TIME" USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/534.51.22 (KHTML, like Gecko) Version/5.1.1 Safari/534.51.22' @@ -97,10 +100,14 @@ def Start(): if hasattr(Site, 'Start'): Site.Start() - - if (Prefs['versiontracking'] == True): - Thread.Create(VersionTrack) + # Assign default values for stuff we may need. + if (not Dict[LAST_USAGE_TIME_KEY]): + Dict[LAST_USAGE_TIME_KEY] = datetime(1900,1,1) + + # Do a bit of housekeeping... See if any plugins that support our additional + # sources functionality are present on this sytem, start a new item check in + # favourites and log version usage. Thread.Create(StartFavouritesCheck) Thread.Create(CheckAdditionalSources, sources=Site.ADDITIONAL_SOURCES) @@ -111,25 +118,9 @@ def Start(): def ValidatePrefs(): if (Prefs['favourite_notify_email']): - Utils.add_favourites_cron(Platform.OS, NAME, VIDEO_PREFIX) - - if (Prefs['favourite_notify_email_test']): - try: - Notifier.notify( - Prefs[ "favourite_notify_email"], - str(NAME), - "TEST", - "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBhIQERUQEBAWEA8QEBQQEBAXFA8QGBAQFRAVFRUQEhQYGyYeFxkjGRIUHy8sJCcpLCwsFR4xNTAqNSYrLCkBCQoKBQUFDQUFDSkYEhgpKSkpKSkpKSkpKSkpKSkpKSkpKSkpKSkpKSkpKSkpKSkpKSkpKSkpKSkpKSkpKSkpKf/AABEIAMIBAwMBIgACEQEDEQH/xAAbAAEAAwEBAQEAAAAAAAAAAAAABQYHBAMCAf/EAEgQAAIBAQIGDQoDBwMFAAAAAAABAgMEEQUWITFRkwYSMjRBU1RhcXSR0dIHEyJSgaGxs8HDQnKyIzOCkqLh8BVicxRDY8Li/8QAFAEBAAAAAAAAAAAAAAAAAAAAAP/EABQRAQAAAAAAAAAAAAAAAAAAAAD/2gAMAwEAAhEDEQA/AKMAa1gLAVmlZqEpWalKUqFJyk6VNtt04tttrKwMlBtGL1l5LR1VLuGL1l5LR1VLuAxcG0YvWXktHVUu4YvWXktHVUu4DFwbRi9ZeS0dVS7hi9ZeS0dVS7gMXBtGL1l5LR1VLuGL1l5LR1VLuAxcG0YvWXktHVUu4YvWXktHVUu4DFwbRi9ZeS0dVS7hi9ZeS0dVS7gMXBtGL1l5LR1VLuGL1l5LR1VLuAxcG0YvWXktHVUu4YvWXktHVUu4DFwbRi9ZeS0dVS7hi9ZeS0dVS7gMXBtGL1l5LR1VLuGL1l5LR1VLuAxcG0YvWXktHVUu4YvWXktHVUu4DFwbRi9ZeS0dVS7hi9ZeS0dVS7gMXBtGL1l5LR1VLuGL1l5LR1VLuAxcG0YvWXktHVUu4YvWXktHVUu4DFwbRi9ZeS0dVS7ig+UOxU6VenGlTjTi6F7UIxgm/OTV7SWe5ICqgAAbRsd3pZ+r0vlxMXNo2O70s/V6Xy4gSAAAAAAAAAAAAHJbcKQpZG75eqs/t0AdZ51rTGG7ko9LSK5asOVJ5E9pHQs/tkcDd+V5XpAstTD1JZm5dCf1uOeWyWPBTb6Wl3kFGLeZX+894YOqvNTl2NfECUxl/wDF/V/8npDZJDhhJfysiv8ASq3Fy93eeVSxVI56cl/CwLHSw1Rl+Pa/mTXvzHbCaavTTWlNMpJ90q8oO+MnF8zuAugICybIZLJUW2XrLI+zMyas9qjUV8JXr4dK4APUAAAAAAAAAADOPKdvil1f7szRzOPKdvil1f7swKcAABtGx3eln6vS+XExc2jY7vSz9XpfLiBIAAAAAAAAH43dleRLO9AbuyvIlnegreFcKuo9rHJTX9T0vmA98I4dbvjSyLhnwv8ALoIZs+6NFzajFXyeZFiwdgWNP0p3Tn7o9GkCJseBqlTLuI6XwrmRMWfAdKGdbd6Xm7CQAHzCmo5IpRWhJL4H0AAAAHlWssJ7qCl0pfEjbTseg8tN7V6HlXeiXAFPtVhnSd043LgedP2nxRryg9tFuL/zI9JcpwUlc1ennTy3kFhLAe1vnSyrO4cK/LpA7cG4YVX0ZejU90ujn5iRKQmWLA+Ftv8As5v0/wAL9ZaOkCVAAAAAAAAM48p2+KXV/uzNHM48p2+KXV/uzApwAAG0bHd6Wfq9L5cTFzaNju9LP1el8uIEgAAAAAAHLhO2eapuS3TyR6dPszgRmHcI3vzUXkW7el+qRFGk5yUYq9vIkfDenhzlkwLg/wA3HbyXpzX8seBAdGDsHRox0ze6lp5lzHWAAAAAA+J1oxzyS6WkB9g8P+up8ZH+ZHrCrGW5kpdDTA+gAAAAENhjBN99SmsueUdP+5c5BJ3ZVkaypl2K5hvB+0lt4r0JPKvVlo6GBK4Kwh52GXdxyS59EjuKhYbW6U1NZs0lpi86LdGSaTWVNXp6UB+gAAAABnHlO3xS6v8AdmaOZx5Tt8Uur/dmBTgAANo2O70s/V6Xy4mLm0bHd6Wfq9L5cQJAAAAAAK1h21baptVmhk/i4e72FirVdrFyeaKb7EUycm2287d76WB24HsfnKiv3MfSlz6F2lpI3ANn2tLbcM3f7FkX17SSAAAAfNSVyb0JvsR9HxW3Mvyy+DAqlbCVWe6m7tC9FdiOY97JYJ1dxG9cMsyXtJSlsb9ap7Er/ewIQJ3ZVkenMTdXY3k9Cpe9DV3vRD1aTg3GSuadzQEjYcOThknfOGn8S6HwlhhNSSad6avT0opRYNjlduEoP8LTXRK/J2r3gS4AAHlarOqkHB5pLsfA+09QBSqlNxbi8ji7n0osGx+1baDg88Hk/K83vvOHZBZ9rUU1mmv6lkfuuPLAlfa1lolfF+3N70gLQAAAAAGceU7fFLq/3ZmjmceU7fFLq/3ZgU4AADaNju9LP1el8uJi5tGx3eln6vS+XECQAAAAAcGHKl1GX+5qPvv+hV7iw7I5fs4rTP4RfeQljjfUgtM4/qQFuo09rFRX4YpdiPsAAAAAAAJAAAVnDzXnnd6sb+m7uuJW34ZhTV0Wpz0LKl0srdSo5Nybvbd7fOB8k7sahknLgbil7L39UQtKk5NRir5PMi2WCyKlBQ4c7emTzsDoAAAAARmyClfSv9WSfseT6ortOe1aks6afY7y1YVjfRn+W/saf0KmBd078unKDxscr6cHphH9KPYAAABnHlO3xS6v92Zo5nHlO3xS6v8AdmBTgAANo2O70s/V6Xy4mLm0bHd6Wfq9L5cQJAAAAABD7JF6EPzP9JEYPf7Wn/yR+KJvZFC+knomvemu4r9Ge1lF6JJ9jAugAAAAAAABWsN2iXnZR2z2quuje7tyuAspVsN/vpez9KA4TvsOB51UpZIwf4nlvy3ZEcF5asC/uIfxfrkB6WLB8KS9FXt55PO+5HSAAAAAAAc2En+xqfkl8Colqw1O6jLnuj2yX9yqgXCwL9lD/jj+lHufFCF0YrRFLsR9gAAAM48p2+KXV/uzNHM48p2+KXV/uzApwAAG0bHd6Wfq9L5cTFzaNju9LP1el8uIEgAAAAA5cKUdtSmuHa3rpWX6FSLuU62Wfzc5Q0PJ0cD7ALTg+tt6UZcO1ufSsj+B0EJsctW6pv8ANH6r4E2AAAAAADjq2uhe1KUNsnc71e71k0HYVDCH72p/yS/UwLB/1dm00+z+x1WatCS/ZtOKd2TMnnu95TSxbHP3b/O/0oCVAAAAAAABDbJK3oxhpbk+hZF8fcQ9io7epGOmSv6L8vuPbCtq85Vk1uV6MehcPbedWx2z3zc+CKuXS/7X9oFhAAAAADOPKdvil1f7szRzOPKdvil1f7swKcAABtGx3eln6vS+XExc2jY7vSz9XpfLiBIAAAAABCbIrJmqrg9GX0f0Js+atJSi4yyqSuYFPs1d05Kazxd/SuFFvoVlOKlHKpK9dxUrZZXSm4PgzPSuBnZgbCXm3tJP0JPP6stPQBZAAAAAA4qmBqUm5OLvk236TztnaAI//QqPqv8AmZ1WWyRpLawVybvzt5f8R7AAAAAAAHBhm2+bhct3PIuZcLOu0WiNOLlJ3Je/QlzlTtlrdWbm/YtC4EB4XFtwZZPNU1H8T9KXS/8ALvYQ+ArBt5eckvRg8nPL+xYgAAAAAAZx5Tt8Uur/AHZmjmceU7fFLq/3ZgU4AADaNju9LP1el8uJi5tGx3eln6vS+XECQAAAAAAABxYUweq0cmScdy//AFfMVecHFtNXNZGtBdSPwpgpVVto5Ki4fW5mBwYJwxtbqdR+jmjL1eZ8xPplKqUnFuMlc1nR2WDC06WTdQ9V8H5XwAWkHNZMIQqr0Xl4YvI17OE6QAAAAAAAAB5Wm0xpx203cvi9C5zjtuG4U8kfTlzZl0vuK/arXKo9tN36FwLmSA9cI4RlWle8kVuY6Od8582CxSqy2qzZ5S0LvFhsEqsro5Et1LgX9y0WSyRpR2sV0vhb0sD7o0VCKjFXRSuR9gAAAAAAAzjynb4pdX+7M0czjynb4pdX+7MCnAAAbRsd3pZ+r0vlxMXNo2O70s/V6Xy4gSAAAAAAAAAAA5rbg+FVXSWVZpLOu9FdtuCp0srW2j6y+q4C1gCkJnfZ8N1YZG9utEsvvzkza8C055btpLTHJ2rMRdfY/UjuWpr+V9j7wOulski91BroakvfcdMMOUX+JrpjIrtWx1I7qEl7H8TxAtX+s0eMXZPuPOeH6KzNy6I95Wbz9UW8yv8AeBNVtknqU/bJ/Rd5G2nCNSpupZPVWRdizn7SwZVlmpvpfo/E77Pscf8A3Jpc0cr7WBDJaCWsOAZS9Kp6MfV4X06CZstghS3Ebn6zyt+06APilRUFtYq5LMkfYAAAAAAAAAAzjynb4pdX+7M0czjynb4pdX+7MCnAAAbRsd3pZ+r0vlxMXJ2zbNrZThGnCpFQhFQivN0ndGKuSvay5EBrYMpx+tvGx1VHwjH628bHVUfCBqwMpx+tvGx1VHwjH628bHVUfCBqwMpx+tvGx1VHwjH628bHVUfCBqwMpx+tvGx1VHwjH628bHVUfCBqwMpx+tvGx1VHwjH628bHVUfCBqwMpx+tvGx1VHwjH628bHVUfCBqx+OCedJ9KTMqx+tvGx1VHwjH628bHVUfCBqfmI+rHsifSV2bJ7jKsfrbxsdVR8Ix+tvGx1VHwgasDKcfrbxsdVR8Ix+tvGx1VHwgasDKcfrbxsdVR8Ix+tvGx1VHwgasDKcfrbxsdVR8Ix+tvGx1VHwgasDKcfrbxsdVR8Ix+tvGx1VHwgasDKcfrbxsdVR8Ix+tvGx1VHwgasDKcfrbxsdVR8Ix+tvGx1VHwgasZx5Tt8Uur/dmcGP1t42Oqo+Ei8LYZq2qSnWkpSjHaJqMYeje3ddFaWwOEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAH//Z" - ) - - return MessageContainer( - "A test email has been sent to: " + Prefs[ "favourite_notify_email"], - "This will most probably fail to arrive in your inbox.\nCheck the wiki for troubleshooting information" - ) - - except Exception, ex: - pass - + # Enable cron if we have favourites which are already being checked. + if (len([x for x in load_favourite_items().get() if x.new_item_check]) > 0): + Utils.add_favourites_cron(Platform.OS, NAME, VIDEO_PREFIX) else: Utils.del_favourites_cron(Platform.OS, NAME, VIDEO_PREFIX) @@ -197,6 +188,16 @@ def VideoMainMenu(): ) ) + if (Prefs['favourite_notify_email_test']): + oc.add( + DirectoryObject( + key=Callback(TestEmailMenu), + title="Send Test Email ", + summary="Send a test email to the email address specified in the preferences", + ) + ) + + # Get latest version number of plugin. try: @@ -222,9 +223,47 @@ def VideoMainMenu(): except Exception, ex: Log("******** Error retrieving and processing latest version information. Exception is:\n" + str(ex)) - + + # This is a user requested menu which the user must go through when using / launching + # the plugin. If it's been more than 3 hours since we last saw the user, assume + # it's a new session and add it to the stats if needed. + if (datetime.utcnow() - Dict[LAST_USAGE_TIME_KEY]) > timedelta(hours=3): + Thread.Create(VersionTrack) + return oc + +#################################################################################################### + +def TestEmailMenu(): + + if (not Prefs[ "favourite_notify_email"]): + return MessageContainer( + "No email set.", + "Please set an email in the preferences before using this functionality." + ) + + else: + try: + Notifier.notify( + Prefs[ "favourite_notify_email"], + str(NAME), + "TEST", + "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBhIQERUQEBAWEA8QEBQQEBAXFA8QGBAQFRAVFRUQEhQYGyYeFxkjGRIUHy8sJCcpLCwsFR4xNTAqNSYrLCkBCQoKBQUFDQUFDSkYEhgpKSkpKSkpKSkpKSkpKSkpKSkpKSkpKSkpKSkpKSkpKSkpKSkpKSkpKSkpKSkpKSkpKf/AABEIAMIBAwMBIgACEQEDEQH/xAAbAAEAAwEBAQEAAAAAAAAAAAAABQYHBAMCAf/EAEgQAAIBAQIGDQoDBwMFAAAAAAABAgMEEQUWITFRkwYSMjRBU1RhcXSR0dIHEyJSgaGxs8HDQnKyIzOCkqLh8BVicxRDY8Li/8QAFAEBAAAAAAAAAAAAAAAAAAAAAP/EABQRAQAAAAAAAAAAAAAAAAAAAAD/2gAMAwEAAhEDEQA/AKMAa1gLAVmlZqEpWalKUqFJyk6VNtt04tttrKwMlBtGL1l5LR1VLuGL1l5LR1VLuAxcG0YvWXktHVUu4YvWXktHVUu4DFwbRi9ZeS0dVS7hi9ZeS0dVS7gMXBtGL1l5LR1VLuGL1l5LR1VLuAxcG0YvWXktHVUu4YvWXktHVUu4DFwbRi9ZeS0dVS7hi9ZeS0dVS7gMXBtGL1l5LR1VLuGL1l5LR1VLuAxcG0YvWXktHVUu4YvWXktHVUu4DFwbRi9ZeS0dVS7hi9ZeS0dVS7gMXBtGL1l5LR1VLuGL1l5LR1VLuAxcG0YvWXktHVUu4YvWXktHVUu4DFwbRi9ZeS0dVS7hi9ZeS0dVS7gMXBtGL1l5LR1VLuGL1l5LR1VLuAxcG0YvWXktHVUu4YvWXktHVUu4DFwbRi9ZeS0dVS7ig+UOxU6VenGlTjTi6F7UIxgm/OTV7SWe5ICqgAAbRsd3pZ+r0vlxMXNo2O70s/V6Xy4gSAAAAAAAAAAAAHJbcKQpZG75eqs/t0AdZ51rTGG7ko9LSK5asOVJ5E9pHQs/tkcDd+V5XpAstTD1JZm5dCf1uOeWyWPBTb6Wl3kFGLeZX+894YOqvNTl2NfECUxl/wDF/V/8npDZJDhhJfysiv8ASq3Fy93eeVSxVI56cl/CwLHSw1Rl+Pa/mTXvzHbCaavTTWlNMpJ90q8oO+MnF8zuAugICybIZLJUW2XrLI+zMyas9qjUV8JXr4dK4APUAAAAAAAAAADOPKdvil1f7szRzOPKdvil1f7swKcAABtGx3eln6vS+XExc2jY7vSz9XpfLiBIAAAAAAAAH43dleRLO9AbuyvIlnegreFcKuo9rHJTX9T0vmA98I4dbvjSyLhnwv8ALoIZs+6NFzajFXyeZFiwdgWNP0p3Tn7o9GkCJseBqlTLuI6XwrmRMWfAdKGdbd6Xm7CQAHzCmo5IpRWhJL4H0AAAAHlWssJ7qCl0pfEjbTseg8tN7V6HlXeiXAFPtVhnSd043LgedP2nxRryg9tFuL/zI9JcpwUlc1ennTy3kFhLAe1vnSyrO4cK/LpA7cG4YVX0ZejU90ujn5iRKQmWLA+Ftv8As5v0/wAL9ZaOkCVAAAAAAAAM48p2+KXV/uzNHM48p2+KXV/uzApwAAG0bHd6Wfq9L5cTFzaNju9LP1el8uIEgAAAAAAHLhO2eapuS3TyR6dPszgRmHcI3vzUXkW7el+qRFGk5yUYq9vIkfDenhzlkwLg/wA3HbyXpzX8seBAdGDsHRox0ze6lp5lzHWAAAAAA+J1oxzyS6WkB9g8P+up8ZH+ZHrCrGW5kpdDTA+gAAAAENhjBN99SmsueUdP+5c5BJ3ZVkaypl2K5hvB+0lt4r0JPKvVlo6GBK4Kwh52GXdxyS59EjuKhYbW6U1NZs0lpi86LdGSaTWVNXp6UB+gAAAABnHlO3xS6v8AdmaOZx5Tt8Uur/dmBTgAANo2O70s/V6Xy4mLm0bHd6Wfq9L5cQJAAAAAAK1h21baptVmhk/i4e72FirVdrFyeaKb7EUycm2287d76WB24HsfnKiv3MfSlz6F2lpI3ANn2tLbcM3f7FkX17SSAAAAfNSVyb0JvsR9HxW3Mvyy+DAqlbCVWe6m7tC9FdiOY97JYJ1dxG9cMsyXtJSlsb9ap7Er/ewIQJ3ZVkenMTdXY3k9Cpe9DV3vRD1aTg3GSuadzQEjYcOThknfOGn8S6HwlhhNSSad6avT0opRYNjlduEoP8LTXRK/J2r3gS4AAHlarOqkHB5pLsfA+09QBSqlNxbi8ji7n0osGx+1baDg88Hk/K83vvOHZBZ9rUU1mmv6lkfuuPLAlfa1lolfF+3N70gLQAAAAAGceU7fFLq/3ZmjmceU7fFLq/3ZgU4AADaNju9LP1el8uJi5tGx3eln6vS+XECQAAAAAcGHKl1GX+5qPvv+hV7iw7I5fs4rTP4RfeQljjfUgtM4/qQFuo09rFRX4YpdiPsAAAAAAAJAAAVnDzXnnd6sb+m7uuJW34ZhTV0Wpz0LKl0srdSo5Nybvbd7fOB8k7sahknLgbil7L39UQtKk5NRir5PMi2WCyKlBQ4c7emTzsDoAAAAARmyClfSv9WSfseT6ortOe1aks6afY7y1YVjfRn+W/saf0KmBd078unKDxscr6cHphH9KPYAAABnHlO3xS6v92Zo5nHlO3xS6v8AdmBTgAANo2O70s/V6Xy4mLm0bHd6Wfq9L5cQJAAAAABD7JF6EPzP9JEYPf7Wn/yR+KJvZFC+knomvemu4r9Ge1lF6JJ9jAugAAAAAAABWsN2iXnZR2z2quuje7tyuAspVsN/vpez9KA4TvsOB51UpZIwf4nlvy3ZEcF5asC/uIfxfrkB6WLB8KS9FXt55PO+5HSAAAAAAAc2En+xqfkl8Colqw1O6jLnuj2yX9yqgXCwL9lD/jj+lHufFCF0YrRFLsR9gAAAM48p2+KXV/uzNHM48p2+KXV/uzApwAAG0bHd6Wfq9L5cTFzaNju9LP1el8uIEgAAAAA5cKUdtSmuHa3rpWX6FSLuU62Wfzc5Q0PJ0cD7ALTg+tt6UZcO1ufSsj+B0EJsctW6pv8ANH6r4E2AAAAAADjq2uhe1KUNsnc71e71k0HYVDCH72p/yS/UwLB/1dm00+z+x1WatCS/ZtOKd2TMnnu95TSxbHP3b/O/0oCVAAAAAAABDbJK3oxhpbk+hZF8fcQ9io7epGOmSv6L8vuPbCtq85Vk1uV6MehcPbedWx2z3zc+CKuXS/7X9oFhAAAAADOPKdvil1f7szRzOPKdvil1f7swKcAABtGx3eln6vS+XExc2jY7vSz9XpfLiBIAAAAABCbIrJmqrg9GX0f0Js+atJSi4yyqSuYFPs1d05Kazxd/SuFFvoVlOKlHKpK9dxUrZZXSm4PgzPSuBnZgbCXm3tJP0JPP6stPQBZAAAAAA4qmBqUm5OLvk236TztnaAI//QqPqv8AmZ1WWyRpLawVybvzt5f8R7AAAAAAAHBhm2+bhct3PIuZcLOu0WiNOLlJ3Je/QlzlTtlrdWbm/YtC4EB4XFtwZZPNU1H8T9KXS/8ALvYQ+ArBt5eckvRg8nPL+xYgAAAAAAZx5Tt8Uur/AHZmjmceU7fFLq/3ZgU4AADaNju9LP1el8uJi5tGx3eln6vS+XECQAAAAAAABxYUweq0cmScdy//AFfMVecHFtNXNZGtBdSPwpgpVVto5Ki4fW5mBwYJwxtbqdR+jmjL1eZ8xPplKqUnFuMlc1nR2WDC06WTdQ9V8H5XwAWkHNZMIQqr0Xl4YvI17OE6QAAAAAAAAB5Wm0xpx203cvi9C5zjtuG4U8kfTlzZl0vuK/arXKo9tN36FwLmSA9cI4RlWle8kVuY6Od8582CxSqy2qzZ5S0LvFhsEqsro5Et1LgX9y0WSyRpR2sV0vhb0sD7o0VCKjFXRSuR9gAAAAAAAzjynb4pdX+7M0czjynb4pdX+7MCnAAAbRsd3pZ+r0vlxMXNo2O70s/V6Xy4gSAAAAAAAAAAA5rbg+FVXSWVZpLOu9FdtuCp0srW2j6y+q4C1gCkJnfZ8N1YZG9utEsvvzkza8C055btpLTHJ2rMRdfY/UjuWpr+V9j7wOulski91BroakvfcdMMOUX+JrpjIrtWx1I7qEl7H8TxAtX+s0eMXZPuPOeH6KzNy6I95Wbz9UW8yv8AeBNVtknqU/bJ/Rd5G2nCNSpupZPVWRdizn7SwZVlmpvpfo/E77Pscf8A3Jpc0cr7WBDJaCWsOAZS9Kp6MfV4X06CZstghS3Ebn6zyt+06APilRUFtYq5LMkfYAAAAAAAAAAzjynb4pdX+7M0czjynb4pdX+7MCnAAAbRsd3pZ+r0vlxMXJ2zbNrZThGnCpFQhFQivN0ndGKuSvay5EBrYMpx+tvGx1VHwjH628bHVUfCBqwMpx+tvGx1VHwjH628bHVUfCBqwMpx+tvGx1VHwjH628bHVUfCBqwMpx+tvGx1VHwjH628bHVUfCBqwMpx+tvGx1VHwjH628bHVUfCBqwMpx+tvGx1VHwjH628bHVUfCBqx+OCedJ9KTMqx+tvGx1VHwjH628bHVUfCBqfmI+rHsifSV2bJ7jKsfrbxsdVR8Ix+tvGx1VHwgasDKcfrbxsdVR8Ix+tvGx1VHwgasDKcfrbxsdVR8Ix+tvGx1VHwgasDKcfrbxsdVR8Ix+tvGx1VHwgasDKcfrbxsdVR8Ix+tvGx1VHwgasDKcfrbxsdVR8Ix+tvGx1VHwgasDKcfrbxsdVR8Ix+tvGx1VHwgasZx5Tt8Uur/dmcGP1t42Oqo+Ei8LYZq2qSnWkpSjHaJqMYeje3ddFaWwOEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAH//Z" + ) + + return MessageContainer( + "A test email has been sent to: " + Prefs[ "favourite_notify_email"], + "This will most probably fail to arrive in your inbox.\nCheck the wiki for troubleshooting information" + ) + + except Exception, ex: + return MessageContainer( + "An error occurred whilst trying to send the test email", + "The system encountered the following error: " + str(ex) + ) + + #################################################################################################### # Menu users seen when they select Update in main menu. @@ -245,6 +284,8 @@ def UpdateMenu(): def TypeMenu(type=None, genre=None, path=[], parent_name=None): + Dict[LAST_USAGE_TIME_KEY] = datetime.utcnow() + type_desc = "Movies" if (type == "tv"): type_desc = "TV Shows" @@ -358,6 +399,8 @@ def TypeMenu(type=None, genre=None, path=[], parent_name=None): def AZListMenu(type=None, genre=None, path=None, parent_name=None, thumb=None): + Dict[LAST_USAGE_TIME_KEY] = datetime.utcnow() + oc = ObjectContainer(view_group="InfoList", title1=parent_name, title2="A-Z") azList = ['123','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z'] @@ -388,6 +431,8 @@ def AZListMenu(type=None, genre=None, path=None, parent_name=None, thumb=None): def GenreMenu(type=None, path=None, parent_name=None): + Dict[LAST_USAGE_TIME_KEY] = datetime.utcnow() + oc = ObjectContainer(no_cache=True, title1=parent_name,title2="Genre", view_group="InfoList") for genre in Site.GetGenres(): @@ -424,6 +469,8 @@ def ItemsMenu( section_name="", start_page=0, path=[], parent_name=None ): + Dict[LAST_USAGE_TIME_KEY] = datetime.utcnow() + num_pages = 2 replace_parent = False title2 = section_name @@ -521,6 +568,8 @@ def ItemsMenu( #################################################################################################### def TVSeasonMenu(mediainfo=None, url=None, item_name=None, path=[], parent_name=None): + Dict[LAST_USAGE_TIME_KEY] = datetime.utcnow() + # Clean up mediainfo that's been passed in from favourites as it will be # keyed for a specifc ep and not a show. mediainfo.season = None @@ -665,6 +714,8 @@ def TVSeasonActionWatch(item_name=None, mediainfo=None, path=None, action="watch #################################################################################################### def TVSeasonEpsMenu(mediainfo=None, season_url=None,item_name=None, path=[], parent_name=None): + Dict[LAST_USAGE_TIME_KEY] = datetime.utcnow() + # Clean up media info that's been passed in from favourites / recently watched. mediainfo.ep_num = None @@ -833,6 +884,8 @@ def TVSeasonEpsActionWatch(item_name=None, items=None, action="watch"): #################################################################################################### def SourcesMenu(mediainfo=None, url=None, item_name=None, path=[], parent_name=None, external_caller=None): + Dict[LAST_USAGE_TIME_KEY] = datetime.utcnow() + if (item_name is None): item_name = mediainfo.title @@ -921,12 +974,18 @@ def SourcesAdditionalMenu(mediainfo): url = "http://localhost:32400/video/" + Site.ADDITIONAL_SOURCES[0] + "/sources/" + mediainfo.id if (mediainfo.type == 'movies'): - url += "/" + mediainfo.title + url += "/" + urllib.quote(mediainfo.title) else: - url += "/" + mediainfo.show_name + "/" + str(mediainfo.season) + "/" + str(mediainfo.ep_num) + url += "/" + urllib.quote(mediainfo.show_name) + "/" + str(mediainfo.season) + "/" + str(mediainfo.ep_num) #Log(url) - return Redirect(url) + + # Can't use Redirect as it doesn't seem to be supported by some clients + # So get the data for them instead by manually doing the redirect ourselves. + request = urllib2.Request(url) + request.add_header('Referer', "http://localhost:32400" + VIDEO_PREFIX + "/") + return urllib2.urlopen(request).read() + #################################################################################################### @@ -1034,6 +1093,8 @@ def SourcesActionWatch(item_name=None, items=None, action="watch"): def SearchResultsMenu(query, type, parent_name=None): + Dict[LAST_USAGE_TIME_KEY] = datetime.utcnow() + oc = ObjectContainer(no_cache=True, view_group = "InfoList", title1=parent_name, title2="Search (" + query + ")") path = [ { 'elem':'Search (' + query + ')', 'query': query }] @@ -1043,7 +1104,7 @@ def SearchResultsMenu(query, type, parent_name=None): func_name = SourcesMenu for item in Parsing.GetSearchResults(query=query, type=type): - title = item.title + " (" + item.releasedate + ")" if item.releasedate else item.title + title = item.title + " (" + str(item.year) + ")" if item.year else item.title oc.add( DirectoryObject( key=Callback(func_name, mediainfo=item, url=item.id, path=path, parent_name=oc.title2), @@ -1067,6 +1128,8 @@ def SearchResultsMenu(query, type, parent_name=None): #################################################################################################### def HistoryMenu(parent_name=None): + Dict[LAST_USAGE_TIME_KEY] = datetime.utcnow() + oc = ObjectContainer(no_cache=True, view_group="InfoList", title1=parent_name, title2=L("HistoryTitle")) history = load_watched_items().get_recent(Prefs['watched_grouping'], int(Prefs['watched_amount'])) @@ -1328,7 +1391,11 @@ def HistoryRemoveFromRecent(mediainfo, path, parent_name): hist = load_watched_items() hist.remove_from_recent(mediainfo, Prefs['watched_grouping']) - save_watched_items(hist) + save_watched_items(hist) + + oc = HistoryMenu(parent_name=parent_name) + oc.replace_parent = True + return oc #################################################################################################### @@ -1359,6 +1426,8 @@ def HistoryAddToFavouritesMenu(mediainfo, path, parent_name): #################################################################################################### def FavouritesMenu(parent_name=None,label=None, new_items_only=None, replace_parent=False): + Dict[LAST_USAGE_TIME_KEY] = datetime.utcnow() + oc = ObjectContainer( no_cache=True, view_group="InfoList", replace_parent=replace_parent, title1=parent_name, title2=L("FavouritesTitle") @@ -1732,12 +1801,15 @@ def FavouritesRemoveItemMenu(mediainfo): def FavouritesNotifyMenu(mediainfo=None): oc = ObjectContainer(title1="", title2="") + oc.header = "New Item Notification" - # Load up favourites and get reference to stored favourite rather than - # dissociated favourite that's been passed in. + cron_op = None Thread.AcquireLock(FAVOURITE_ITEMS_KEY) + try: + # Load up favourites and get reference to stored favourite rather than + # dissociated favourite that's been passed in. favs = load_favourite_items() fav = favs.get(mediainfo=mediainfo)[0] @@ -1751,6 +1823,13 @@ def FavouritesNotifyMenu(mediainfo=None): fav.items = None fav.date_last_item_check = None oc.message = "Plugin will no longer check for new items." + + # If no other favourites are getting checked, remove cron. + if ( + Prefs['favourite_notify_email'] and + len([x for x in favs.get() if x.new_item_check]) == 0 + ): + cron_op = 'del' else: @@ -1762,18 +1841,32 @@ def FavouritesNotifyMenu(mediainfo=None): url = [v for k,v in fav.path[-1].items() if (k == 'show_url' or k == 'season_url')][0] # Get URLs of all the shows for the current favourite. - fav.items = [show['ep_url'] for show in Parsing.GetTVSeasonEps(url)] + fav.items = [show['ep_url'] for show in Parsing.GetTVSeasonEps(url)][:-1] fav.date_last_item_check = datetime.utcnow() oc.message = "Plugin will check for new items and notify you when one is available.\nNote that this may slow down the plugin at startup." + # If we're the first favourite and user has chosen email notifications, + # add cron / scheduled task. + if ( + Prefs['favourite_notify_email'] and + len([x for x in favs.get() if x.new_item_check]) == 1 + ): + cron_op = 'add' + save_favourite_items(favs) finally: Thread.ReleaseLock(FAVOURITE_ITEMS_KEY) - oc.header = "New Item Notification" - + # Do this here to + # a) minimise risk of someting going wrong in favs manipulation and + # b) minimise lock length. + if (cron_op == 'add'): + Utils.add_favourites_cron(Platform.OS, NAME, VIDEO_PREFIX) + elif (cron_op == 'del'): + Utils.del_favourites_cron(Platform.OS, NAME, VIDEO_PREFIX) + return oc @@ -1788,6 +1881,8 @@ def NoOpMenu(): @route(VIDEO_PREFIX + '/favourites/check') def StartFavouritesCheck(): + FileLog.FileLog("","") + FileLog.FileLog("StartFavouritesCheck","") CheckForNewItemsInFavourites() return "" @@ -1809,6 +1904,9 @@ def CheckForNewItemsInFavourite(favourite, force=False): #Log("Processing favourite: " + str(favourite.mediainfo)) #Log("Favourite Last Checktime: " + str(favourite.date_last_item_check)) + FileLog.FileLog("CheckForNewItemsInFavourite","Processing favourite: " + str(favourite.mediainfo)) + FileLog.FileLog("CheckForNewItemsInFavourite","Favourite Last Checktime: " + str(favourite.date_last_item_check)) + # Do we want to check this favourite for updates? # If so, only bother if it's not already marked as having updates. # and hasn't been checked in the last 12 hours. @@ -1816,12 +1914,13 @@ def CheckForNewItemsInFavourite(favourite, force=False): force or ( favourite.new_item_check and - not favourite.new_item and - (datetime.utcnow() - favourite.date_last_item_check) > timedelta(hours=12) + not favourite.new_item + #and (datetime.utcnow() - favourite.date_last_item_check) > timedelta(hours=12) ) ): #Log("Checking for new item in favourite") + FileLog.FileLog("CheckForNewItemsInFavourite","Checking for new item in favourite") # Get page URL url = [v for k,v in favourite.path[-1].items() if (k == 'show_url' or k == 'season_url')][0] @@ -1837,7 +1936,8 @@ def CheckForNewItemsInFavourite(favourite, force=False): items_set = set(items) new_items = items_set.difference(set(favourite.items)) #Log("Found new items: " + str(new_items)) - + FileLog.FileLog("CheckForNewItemsInFavourite", "Found new items: " + str(new_items)) + # Items list is different. # Because we may be taking a while to do this # processing (we're relying on making a whole lot of HTTP requests to get @@ -1865,6 +1965,7 @@ def CheckForNewItemsInFavourite(favourite, force=False): try: if (len(new_items) > 0 and Prefs['favourite_notify_email'] and not force): Log('Notifying about new item for title: ' + favourite.mediainfo.title) + FileLog.FileLog("CheckForNewItemsInFavourite", 'Notifying about new item for title: ' + favourite.mediainfo.title) Notifier.notify( Prefs['favourite_notify_email'], str(NAME), @@ -1874,6 +1975,8 @@ def CheckForNewItemsInFavourite(favourite, force=False): except Exception, ex: Log("ERROR Whilst sending email notification about " + favourite.mediainfo.title) Log(str(ex)) + FileLog.FileLog("CheckForNewItemsInFavourite","ERROR Whilst sending email notification about " + favourite.mediainfo.title) + FileLog.FileLog("CheckForNewItemsInFavourite",str(ex)) pass @@ -2002,7 +2105,7 @@ def GetAdditionalSources(imdb_id, title, year=None, season_num=None, ep_num=None season=season_num, ep_num=ep_num ), - need_meta_retrieve(mediainfo.type) + need_meta_retrieve(type) ) oc = SourcesMenu(mediainfo, ep[0]['ep_url'], external_caller=caller) @@ -2063,7 +2166,7 @@ def PlaybackCaller(url): # See if the URL being played is on our recently browsed list. caller = cerealizer.loads(Data.Load(BROWSED_ITEMS_KEY)).getCaller(decoded_url) if (not caller): - caller = VIDEO_PREFIX + caller = None #Log(caller) @@ -2153,12 +2256,15 @@ def PlaybackStarted(id, season_num=None, ep_num=None): def VersionTrack(): - try: - request = urllib2.Request(VERSION_URLS[VERSION]) - request.add_header('User-agent', '-') - response = urllib2.urlopen(request) - except: - pass + if (Prefs['versiontracking']): + try: + # Has there been a 3 hour idle window since we were last called? + # If so, assume this is a new user session and count it towards stats. + request = urllib2.Request(VERSION_URLS[VERSION]) + request.add_header('User-agent', '-') + response = urllib2.urlopen(request) + except: + pass #################################################################################################### def CheckAdditionalSources(sources): diff --git a/Contents/DefaultPrefs.json b/Contents/DefaultPrefs.json index f179f04..fa34c3d 100644 --- a/Contents/DefaultPrefs.json +++ b/Contents/DefaultPrefs.json @@ -36,7 +36,7 @@ { "id": "favourite_notify_email_test", "type": "bool", - "label": "Send Test Email", + "label": "Show 'Send Test Email'", "default": "false" }, { diff --git a/Contents/Libraries/Shared/FileLog.py b/Contents/Libraries/Shared/FileLog.py new file mode 100644 index 0000000..230215d --- /dev/null +++ b/Contents/Libraries/Shared/FileLog.py @@ -0,0 +1 @@ +import sys import os from datetime import datetime def FileLog(fn_name, msg): message = str(datetime.now()) + " - [" + str(fn_name) + "]: " + msg + "\n" f = None try: f = open(sys.path[0] + os.sep + "PLEX_LMWT_Favs.log", 'a') f.write(message) finally: if (f): f.close() \ No newline at end of file diff --git a/Contents/Services/URL/FileNuke/ServiceCode.pys b/Contents/Services/URL/FileNuke/ServiceCode.pys index 82cf9ac..b34ec6a 100644 --- a/Contents/Services/URL/FileNuke/ServiceCode.pys +++ b/Contents/Services/URL/FileNuke/ServiceCode.pys @@ -1 +1 @@ -import re, string, urllib2 from datetime import date from BeautifulSoup import BeautifulSoup USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/534.51.22 (KHTML, like Gecko) Version/5.1.1 Safari/534.51.22' def NormalizeURL(url): #Log("*********** In FileNuke normalizeURL") # Deal with special providerInfo URL built up by plugin to return # info about this provider. For all other normal URLs, do nothing. if ("providerinfo" in url): # Extract out domain. match = re.search("(filenuke)", url.lower()) if (match is None): return url try: show = Prefs["show_" + match.group(1)] except Exception, ex: show = False if (show): return url + "&visible=true" else: return url else: return url def MetadataObjectForURL(url): #Log('In MetadataObjectForURL for FileNuke (' + url + ')') return VideoClipObject( title = 'FileNuke Redirect Page', summary = 'FileNuke Redirect Page', thumb = None, ) def MediaObjectsForURL(url): #Log('In MediaObjectsForURL for FileNuke (' + url + ')') return [ MediaObject( parts = [PartObject(key=Callback(PlayVideo, url=url))], ) ] @indirect def PlayVideo(url): HTTP.Headers['User-Agent'] = USER_AGENT # Request Initial Provider page. try: #Log('Requesting ' + url) soup = BeautifulSoup(HTTP.Request(url).content) except Exception, ex: return LogProviderError("Error whilst retrieving initial provider page (" + url + ")", ex) # Extract out these form elements if present... try: formElems = ['op', 'id', 'fname', 'method_free', 'referer', 'usr_login'] params = {} for formElem in formElems: formElemVal = soup.find('input', {'name' : formElem })['value'] params[formElem] = formElemVal except Exception, ex: return LogProviderError("Error whilst extracting out form elemnts to navigate to 2nd page.",ex) # Navigate to 2nd page. try: contents = HTTP.Request(url, values=params, headers={ 'Referer': url }, cacheTime=0).content soup = BeautifulSoup(contents) except Exception, ex: return LogProviderError("Error whilst retrieving second provider page (" + url + ")", ex) # Extract out JS packed final video URL. script_elems = soup.find('div', { 'id': 'player_code' }).findAll('script') elems = None video_url = None for script_elem in script_elems: script = script_elem.string if script is None: continue #Log(script) # Look for substitution values. sub_vals = re.search("\d{2},'([^']*)'.split", script) if (sub_vals is None): continue elems = sub_vals.group(1).split('|') #Log(elems) # Look for url to substitute values into. url_re = re.search("([0-9a-z]://[0-9a-z]\.[0-9a-z]\.[0-9a-z]\.[0-9a-z]/[0-9a-z]/[0-9a-z]/[0-9a-z]\.[0-9a-z])", script) #Log(url_re.group(1)) if (url_re is None or url_re.group(1) is None): continue video_url = url_re.group(1) if (elems is None or video_url is None): return LogProviderError("Error whilst extracting out / depacking video URL elements", None) # Create dict to map url sub keys to sub values. alphadict = dict() for index_cnt in range(0, 2): index = index_cnt * len(string.digits + string.ascii_lowercase) strindex = str(index_cnt) if index_cnt > 0 else "" for cnt in range(0, len(string.digits + string.ascii_lowercase)): alphadict[strindex + (string.digits + string.ascii_lowercase)[cnt]] = cnt + index def SubElem(matchObj): val = elems[alphadict[matchObj.group(0)]] if (val == ""): val = matchObj.group(0) return val # Sub values into url to get final url. final_url = re.sub("[0-9a-z]{1,2}", SubElem, video_url) Log(final_url) oc = ObjectContainer( objects = [ VideoClipObject( items = [ MediaObject( parts = [PartObject(key=final_url)] ) ] ) ] ) # Might as well set a sensible user agent string. oc.user_agent = USER_AGENT return oc def LogProviderError(msg="", ex=None): Log("************************** PROVIDER ERROR: " + msg) return [] \ No newline at end of file +import re, string, urllib2 from datetime import date from BeautifulSoup import BeautifulSoup USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/534.51.22 (KHTML, like Gecko) Version/5.1.1 Safari/534.51.22' def NormalizeURL(url): #Log("*********** In FileNuke normalizeURL") # Deal with special providerInfo URL built up by plugin to return # info about this provider. For all other normal URLs, do nothing. if ("providerinfo" in url): # Extract out domain. match = re.search("(filenuke)", url.lower()) if (match is None): return url try: show = Prefs["show_" + match.group(1)] except Exception, ex: show = False if (show): return url + "&visible=true" else: return url else: return url def MetadataObjectForURL(url): #Log('In MetadataObjectForURL for FileNuke (' + url + ')') return VideoClipObject( title = 'FileNuke Redirect Page', summary = 'FileNuke Redirect Page', thumb = None, ) def MediaObjectsForURL(url): #Log('In MediaObjectsForURL for FileNuke (' + url + ')') return [ MediaObject( parts = [PartObject(key=Callback(PlayVideo, url=url))], ) ] @indirect def PlayVideo(url): HTTP.Headers['User-Agent'] = USER_AGENT # Request Initial Provider page. try: #Log('Requesting ' + url) soup = BeautifulSoup(HTTP.Request(url).content) except Exception, ex: return LogProviderError("Error whilst retrieving initial provider page (" + url + ")", ex) # Extract out these form elements if present... try: formElems = ['op', 'id', 'fname', 'method_free', 'referer', 'usr_login'] params = {} for formElem in formElems: formElemVal = soup.find('input', {'name' : formElem })['value'] params[formElem] = formElemVal except Exception, ex: return LogProviderError("Error whilst extracting out form elemnts to navigate to 2nd page.",ex) # Navigate to 2nd page. try: contents = HTTP.Request(url, values=params, headers={ 'Referer': url }, cacheTime=0).content soup = BeautifulSoup(contents) except Exception, ex: return LogProviderError("Error whilst retrieving second provider page (" + url + ")", ex) # Extract out JS packed final video URL. script_elems = soup.find('div', { 'id': 'player_code' }).findAll('script') elems = None video_url = None for script_elem in script_elems: script = script_elem.string if script is None: continue #Log(script) # Look for substitution values. sub_vals = re.search("\d{2},'([^']*)'.split", script) if (sub_vals is None): continue elems = sub_vals.group(1).split('|') #Log(elems) # Look for url to substitute values into. url_re = re.search("([0-9a-z]://[0-9a-z]\.[0-9a-z]\.[0-9a-z]\.[0-9a-z]/[0-9a-z]/[0-9a-z]/[0-9a-z]\.[0-9a-z])", script) #Log(url_re.group(1)) if (url_re is None or url_re.group(1) is None): continue video_url = url_re.group(1) if (elems is None or video_url is None): return LogProviderError("Error whilst extracting out / depacking video URL elements", None) # Create dict to map url sub keys to sub values. alphadict = dict() for index_cnt in range(0, 2): index = index_cnt * len(string.digits + string.ascii_lowercase) strindex = str(index_cnt) if index_cnt > 0 else "" for cnt in range(0, len(string.digits + string.ascii_lowercase)): alphadict[strindex + (string.digits + string.ascii_lowercase)[cnt]] = cnt + index def SubElem(matchObj): val = elems[alphadict[matchObj.group(0)]] if (val == ""): val = matchObj.group(0) return val # Sub values into url to get final url. final_url = re.sub("[0-9a-z]{1,2}", SubElem, video_url) Log(final_url) oc = ObjectContainer( objects = [ VideoClipObject( items = [ MediaObject( parts = [PartObject(key=final_url)] ) ] ) ] ) # Might as well set a sensible user agent string. oc.user_agent = USER_AGENT return oc def LogProviderError(msg="", ex=None): Log("************************** PROVIDER ERROR: " + msg) raise Exception(msg) return [] \ No newline at end of file diff --git a/Contents/Services/URL/LMWT/ServiceCode.pys b/Contents/Services/URL/LMWT/ServiceCode.pys index 24a9d2b..7546975 100644 --- a/Contents/Services/URL/LMWT/ServiceCode.pys +++ b/Contents/Services/URL/LMWT/ServiceCode.pys @@ -1 +1 @@ -import re, urlparse, cgi, base64, urllib2 USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/534.51.22 (KHTML, like Gecko) Version/5.1.1 Safari/534.51.22' PLEX_URL = "http://127.0.0.1:32400" PLUGIN_URL = PLEX_URL + "/video/lmwt" def NormalizeURL(url): return url def MetadataObjectForURL(url): #Log('In MetadataObjectForURL for LMWT (' + url + ')') video = None # Plugin should have access to info about this URL if user used plugin to launch video. # Bad things can happen here. Still want to run rest of code if possible though... try: request = urllib2.Request(PLUGIN_URL + "/mediainfo/%s" % String.Encode(url)) mediainfo = JSON.ObjectFromString(urllib2.urlopen(request).read()) #Log(mediainfo) video = VideoClipObject( title=mediainfo['title'], summary=mediainfo['summary'], art=mediainfo['background'], thumb= mediainfo['poster'], rating = float(mediainfo['rating']), duration=mediainfo['duration'], year=mediainfo['year'], originally_available_at= ( date.fromordinal(mediainfo['release_date']) if ('release_date' in mediainfo and mediainfo['release_date']) else None ), genres=mediainfo['genres'], ) except Exception, ex: Log(ex) #Log(video) if video is None: # Return bare minimum. This is never shown to users. video = VideoClipObject( title = 'LMWT Redirect Page', summary = 'LMWT Redirect Page', thumb = None, ) return video def MediaObjectsForURL(url): #Log('In MediaObjectsForURL for LMWT') ret = [] ret.append( MediaObject( parts = [PartObject(key=Callback(PlayVideo, url=url))], ) ) return ret @indirect def PlayVideo(url): # Extract out and break down query string of the LMWT Provider URL... lmwt_qs_args = cgi.parse_qs(urlparse.urlparse(url).query) # Extract out provider URL provider_url = base64.b64decode(lmwt_qs_args['url'][0]) media_objects = URLService.MediaObjectsForURL(provider_url) if (len(media_objects) > 0): PlaybackStarted(url=url) return ObjectContainer( objects = [ VideoClipObject( items = media_objects ) ] ) def LogProviderError(msg="", ex=None): Log("************************** PROVIDER ERROR: " + msg) return [] ################################################################################################## # LMWT Plugin specific helper methods. def PlaybackStarted(url): # Bad things can happen here. Let's try to be neat though.... try: caller = "lmwt" # We may be playing the video on behalf of another plugin. In that case, we'll need to # call that plugin's PlaybackStarted method insted of our own. # # Check if this is the case by seeing who originally called for the source listing. try: request = urllib2.Request( PLUGIN_URL + "/playback/caller/" + String.Encode(url) ) caller = JSON.ObjectFromString(urllib2.urlopen(request).read())['caller'] except Exception, ex: pass # Get the media info object that was built by this plugin when generating the # source listing. We'll use the info in that to talk to whatever plugin we need to # tell the item has started playing. request = urllib2.Request(PLUGIN_URL + "/mediainfo/" + String.Encode(url)) mediainfo = JSON.ObjectFromString(urllib2.urlopen(request).read()) # Use the information from the mediainfo to call the PlaybackStarted method of # whatever plugin requested this. url = PLEX_URL + '/video/' + caller + "/playback/%s" % mediainfo['id'] if (mediainfo['ep_num']): url += "/" + str(mediainfo['season']) + "/" + str(mediainfo['ep_num']) Log(url) request = urllib2.Request(url) response = urllib2.urlopen(request) except Exception, ex: Log.Exception("Error whilst trying to mark item as played") pass \ No newline at end of file +import re import urlparse import cgi import base64 import urllib2 from datetime import date USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/534.51.22 (KHTML, like Gecko) Version/5.1.1 Safari/534.51.22' PLEX_URL = "http://127.0.0.1:32400" PLUGIN_URL = PLEX_URL + "/video/lmwt" def NormalizeURL(url): return url def MetadataObjectForURL(url): Log('In MetadataObjectForURL for LMWT (' + url + ')') video = None # Plugin should have access to info about this URL if user used plugin to launch video. # Bad things can happen here. Still want to run rest of code if possible though... try: request = urllib2.Request(PLUGIN_URL + "/mediainfo/%s" % String.Encode(url)) mediainfo = JSON.ObjectFromString(urllib2.urlopen(request).read()) #Log(mediainfo) video = VideoClipObject( title=mediainfo['title'], summary=mediainfo['summary'], art=mediainfo['background'], thumb= mediainfo['poster'], rating = float(mediainfo['rating']), duration=mediainfo['duration'], year=mediainfo['year'], originally_available_at= ( date.fromordinal(mediainfo['release_date']) if ('release_date' in mediainfo and mediainfo['release_date']) else None ), genres=mediainfo['genres'], ) except Exception, ex: Log(ex) #Log(video) if video is None: # Return bare minimum. This is never shown to users. video = VideoClipObject( title = 'LMWT Redirect Page', summary = 'LMWT Redirect Page', thumb = None, ) return video def MediaObjectsForURL(url): #Log('In MediaObjectsForURL for LMWT') ret = [] ret.append( MediaObject( parts = [PartObject(key=Callback(PlayVideo, url=url))], ) ) return ret @indirect def PlayVideo(url): # Extract out and break down query string of the LMWT Provider URL... lmwt_qs_args = cgi.parse_qs(urlparse.urlparse(url).query) # Extract out provider URL provider_url = base64.b64decode(lmwt_qs_args['url'][0]) media_objects = URLService.MediaObjectsForURL(provider_url) if (len(media_objects) > 0): PlaybackStarted(url=url) return ObjectContainer( objects = [ VideoClipObject( items = media_objects ) ] ) ################################################################################################## # LMWT Plugin specific helper methods. def PlaybackStarted(url): # Bad things can happen here. Let's try to be neat though.... try: caller = "lmwt" # We may be playing the video on behalf of another plugin. In that case, we'll need to # call that plugin's PlaybackStarted method insted of our own. # # Check if this is the case by seeing who originally called for the source listing. try: request = urllib2.Request( PLUGIN_URL + "/playback/caller/" + String.Encode(url) ) response = JSON.ObjectFromString(urllib2.urlopen(request).read()) if (response['caller']): caller = response['caller'] except Exception, ex: pass # Get the media info object that was built by this plugin when generating the # source listing. We'll use the info in that to talk to whatever plugin we need to # tell the item has started playing. request = urllib2.Request(PLUGIN_URL + "/mediainfo/" + String.Encode(url)) mediainfo = JSON.ObjectFromString(urllib2.urlopen(request).read()) # Use the information from the mediainfo to call the PlaybackStarted method of # whatever plugin requested this. url = PLEX_URL + '/video/' + caller + "/playback/%s" % mediainfo['id'] if (mediainfo['ep_num']): url += "/" + str(mediainfo['season']) + "/" + str(mediainfo['ep_num']) Log(url) request = urllib2.Request(url) response = urllib2.urlopen(request) except Exception, ex: Log.Exception("Error whilst trying to mark item as played") pass \ No newline at end of file diff --git a/Contents/Services/URL/MovPod/ServiceCode.pys b/Contents/Services/URL/MovPod/ServiceCode.pys index d2b62ac..f505a5d 100644 --- a/Contents/Services/URL/MovPod/ServiceCode.pys +++ b/Contents/Services/URL/MovPod/ServiceCode.pys @@ -1 +1 @@ -import re, urlparse, cgi, urllib2 from datetime import date from BeautifulSoup import BeautifulSoup USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/534.51.22 (KHTML, like Gecko) Version/5.1.1 Safari/534.51.22' def NormalizeURL(url): #Log("*********** In MovPod / DaClips / GorillaVid normalizeURL") # Deal with special providerInfo URL built up by plugin to return # info about this provider. For all other normal URLs, do nothing. if ("providerinfo" in url): # Extract out domain. match = re.search("(movpod|daclips|gorillavid)", url.lower()) if (match is None): return url try: show = Prefs["show_" + match.group(1)] except Exception, ex: show = False if (show): return url + "&visible=true" else: return url else: return url def MetadataObjectForURL(url): #Log('In MetadataObjectForURL for MovPod / DaClips (' + url + ')') return VideoClipObject( title = 'MovPod / DaClips / GorillaVid Redirect Page', summary = 'MovPod / DaClips / GorillaVid Redirect Page', thumb = None, ) def MediaObjectsForURL(url): #Log('In MediaObjectsForURL for MovPod / DaClips (' + url + ')') return [ MediaObject( parts = [PartObject(key=Callback(PlayVideo, url=url))], ) ] @indirect def PlayVideo(url): # Request movpod page. try: #Log('Requesting ' + url) request = urllib2.Request(url) request.add_header('User-agent', USER_AGENT) response = urllib2.urlopen(request) # Collect final url and read in MovPod page. provider_url = response.geturl() if "404" in provider_url: return LogProviderError('Video no longer available (404 Returned)') #Log(provider_url) soup = BeautifulSoup(response.read()) except Exception, ex: return LogProviderError("Error whilst retrieving initial provider page (" + url + ")", ex) # Extract out these form elements if present... elemFound = False try: formElems = ['op', 'id', 'fname', 'method_free', 'referer', 'usr_login'] params = {} for formElem in formElems: formElemVal = soup.find('input', {'name' : formElem })['value'] params[formElem] = formElemVal elemFound = True except Exception, ex: # This isn't necessarily the end of the world. Could be one of those pages where the # video just happens to be on the 1st page. pass # If any of the form elements were found, then the page was a waiting # page and we need to request the actual video page. if (elemFound): try: #Log(params) HTTP.Headers['User-agent'] = USER_AGENT headers = { 'Referer': provider_url } soup = BeautifulSoup(HTTP.Request(provider_url,values=params,headers=headers).content) except Exception, ex: return LogProviderError("Error whilst retrieving second provider page (" + provider_url + ")", ex) # Extract out video url. content = str(soup.contents) file = re.search('file:\"(.*?)\"', content) if (file is None): msg = 'Video URL not found on page\n' # Look for error message on page. err = soup.find('b', "msg_ok") if (err != None): msg = " Provider message: " msg = msg + "".join(err.findAll(text=True)) return LogProviderError(msg) final_url = file.group(1) Log(final_url) return ObjectContainer( objects = [ VideoClipObject( items = [ MediaObject( parts = [PartObject(key=final_url)] ) ] ) ] ) def LogProviderError(msg="", ex=None): Log("************************** PROVIDER ERROR: " + msg) return [] \ No newline at end of file +import re, urlparse, cgi, urllib2 from datetime import date from BeautifulSoup import BeautifulSoup USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/534.51.22 (KHTML, like Gecko) Version/5.1.1 Safari/534.51.22' def NormalizeURL(url): #Log("*********** In MovPod / DaClips / GorillaVid normalizeURL") # Deal with special providerInfo URL built up by plugin to return # info about this provider. For all other normal URLs, do nothing. if ("providerinfo" in url): # Extract out domain. match = re.search("(movpod|daclips|gorillavid)", url.lower()) if (match is None): return url try: show = Prefs["show_" + match.group(1)] except Exception, ex: show = False if (show): return url + "&visible=true" else: return url else: return url def MetadataObjectForURL(url): #Log('In MetadataObjectForURL for MovPod / DaClips (' + url + ')') return VideoClipObject( title = 'MovPod / DaClips / GorillaVid Redirect Page', summary = 'MovPod / DaClips / GorillaVid Redirect Page', thumb = None, ) def MediaObjectsForURL(url): #Log('In MediaObjectsForURL for MovPod / DaClips (' + url + ')') return [ MediaObject( parts = [PartObject(key=Callback(PlayVideo, url=url))], ) ] @indirect def PlayVideo(url): # Request movpod page. try: #Log('Requesting ' + url) request = urllib2.Request(url) request.add_header('User-agent', USER_AGENT) response = urllib2.urlopen(request) # Collect final url and read in MovPod page. provider_url = response.geturl() if "404" in provider_url: return LogProviderError('Video no longer available (404 Returned)') #Log(provider_url) soup = BeautifulSoup(response.read()) except Exception, ex: return LogProviderError("Error whilst retrieving initial provider page (" + url + ")", ex) # Extract out these form elements if present... elemFound = False try: formElems = ['op', 'id', 'fname', 'method_free', 'referer', 'usr_login'] params = {} for formElem in formElems: formElemVal = soup.find('input', {'name' : formElem })['value'] params[formElem] = formElemVal elemFound = True except Exception, ex: # This isn't necessarily the end of the world. Could be one of those pages where the # video just happens to be on the 1st page. pass # If any of the form elements were found, then the page was a waiting # page and we need to request the actual video page. if (elemFound): try: #Log(params) HTTP.Headers['User-agent'] = USER_AGENT headers = { 'Referer': provider_url } soup = BeautifulSoup(HTTP.Request(provider_url,values=params,headers=headers).content) except Exception, ex: return LogProviderError("Error whilst retrieving second provider page (" + provider_url + ")", ex) # Extract out video url. content = str(soup.contents) file = re.search('file:\"(.*?)\"', content) if (file is None): msg = 'Video URL not found on page\n' # Look for error message on page. err = soup.find('b', "msg_ok") if (err != None): msg = " Provider message: " msg = msg + "".join(err.findAll(text=True)) return LogProviderError(msg) final_url = file.group(1) Log(final_url) return ObjectContainer( objects = [ VideoClipObject( items = [ MediaObject( parts = [PartObject(key=final_url)] ) ] ) ] ) def LogProviderError(msg="", ex=None): Log("************************** PROVIDER ERROR: " + msg) raise Exception(msg) return [] \ No newline at end of file diff --git a/Contents/Services/URL/MovShare/ServiceCode.pys b/Contents/Services/URL/MovShare/ServiceCode.pys index 9f2293e..339a071 100644 --- a/Contents/Services/URL/MovShare/ServiceCode.pys +++ b/Contents/Services/URL/MovShare/ServiceCode.pys @@ -1 +1 @@ -import re, urlparse, cgi, urllib, urllib2, cookielib, urlparse from datetime import date from BeautifulSoup import BeautifulSoup USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/534.51.22 (KHTML, like Gecko) Version/5.1.1 Safari/534.51.22' API_URL = "http://%s/api/player.api.php?pass=undefined&file=%s&user=undefined&key=%s&codes=undefined" def NormalizeURL(url): #Log("*********** In MovShare normalizeURL") # Deal with special providerInfo URL built up by plugin to return # info about this provider. For all other normal URLs, do nothing. if ("providerinfo" in url): # Extract out domain. match = re.search("(movshare|novamov|nowvideo|divxstage|videoweed)", url.lower()) if (match is None): return url try: show = Prefs["show_" + match.group(1)] except Exception, ex: show = False if (show): return url + "&visible=true" else: return url else: return url def MetadataObjectForURL(url): #Log('In MetadataObjectForURL for MovShare (' + url + ')') return VideoClipObject( title = 'MovShare Redirect Page', summary = 'MovShare Redirect Page', thumb = None, ) def MediaObjectsForURL(url): #Log('In MediaObjectsForURL for MovShare (' + url + ')') return [ MediaObject( parts = [PartObject(key=Callback(PlayVideo, url=url))], ) ] @indirect def PlayVideo(url): cj = cookielib.CookieJar() opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) # Request Initial Provider page. try: #Log('Requesting ' + url) request = urllib2.Request(url) request.add_header('User-agent', USER_AGENT) response = opener.open(request) # Read in location and content of MovShare page. soup = BeautifulSoup(response.read()) provider_url = response.geturl() #Log(provider_url) except Exception, ex: return LogProviderError("Error whilst retrieving initial provider page (" + url + ")", ex) # See if we have a form to submit before video page... form = soup.find('form', { 'id' : 'watch' }) if (form is not None): # Submit the form to be taken to video page. try: # Get params to submit form with. params = {} for elem in form.findAll('input', {'type' : 'hidden' }): params[elem['name']] = elem['value'] #Log("Params: " + str(params)) #Log('Requesting ' + provider_url) # Post to form request = urllib2.Request(provider_url) request.add_header('User-agent', USER_AGENT) request.add_data(urllib.urlencode(params)) response = opener.open(request) soup = BeautifulSoup(response.read()) except Exception, ex: return LogProviderError("Error whilst trying to navigate from initial provider page to video page (" + url + ")", ex) # Read in API Key info and file ID from video page. try: #Log(soup.contents) api_key = re.search("flashvars\.filekey=\"(.*)\"", str(soup.contents)).group(1) file_id = re.search("flashvars\.file=\"(.*)\"", str(soup.contents)).group(1) #Log("API KEY:" + api_key) #Log("File ID:" + file_id) except Exception, ex: return LogProviderError("Error whilst retrieving API Key and File ID. Provider may have changed page layout.", ex) # Get final video location from API. try: # Build up and retrieve API URL api_url = API_URL % ( urlparse.urlparse(provider_url).netloc, file_id, urllib.quote_plus(api_key) ) #Log('Requesting ' + api_url) request = urllib2.Request(api_url) request.add_header('User-agent', USER_AGENT) response = opener.open(request) content = response.read() #Log(content) # API should be HTML form encoded query string. Break it down to get elem we're # interested in. api_info = cgi.parse_qs(content) final_url = api_info['url'][0] except Exception, ex: return LogProviderError("Error whilst retrieving final url from API page.", ex) Log(final_url) oc = ObjectContainer( objects = [ VideoClipObject( items = [ MediaObject( parts = [PartObject(key=final_url)] ) ] ) ] ) # Might as well set a sensible user agent string. oc.user_agent = USER_AGENT return oc # Util methods def LogProviderError(msg="", ex=None): Log("************************** PROVIDER ERROR: " + msg) return [] \ No newline at end of file +import re, urlparse, cgi, urllib, urllib2, cookielib, urlparse from datetime import date from BeautifulSoup import BeautifulSoup USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/534.51.22 (KHTML, like Gecko) Version/5.1.1 Safari/534.51.22' API_URL = "http://%s/api/player.api.php?pass=undefined&file=%s&user=undefined&key=%s&codes=undefined" def NormalizeURL(url): #Log("*********** In MovShare normalizeURL") # Deal with special providerInfo URL built up by plugin to return # info about this provider. For all other normal URLs, do nothing. if ("providerinfo" in url): # Extract out domain. match = re.search("(movshare|novamov|nowvideo|divxstage|videoweed)", url.lower()) if (match is None): return url try: show = Prefs["show_" + match.group(1)] except Exception, ex: show = False if (show): return url + "&visible=true" else: return url else: return url def MetadataObjectForURL(url): #Log('In MetadataObjectForURL for MovShare (' + url + ')') return VideoClipObject( title = 'MovShare Redirect Page', summary = 'MovShare Redirect Page', thumb = None, ) def MediaObjectsForURL(url): #Log('In MediaObjectsForURL for MovShare (' + url + ')') return [ MediaObject( parts = [PartObject(key=Callback(PlayVideo, url=url))], ) ] @indirect def PlayVideo(url): cj = cookielib.CookieJar() opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) # Request Initial Provider page. try: #Log('Requesting ' + url) request = urllib2.Request(url) request.add_header('User-agent', USER_AGENT) response = opener.open(request) # Read in location and content of MovShare page. soup = BeautifulSoup(response.read()) provider_url = response.geturl() #Log(provider_url) except Exception, ex: return LogProviderError("Error whilst retrieving initial provider page (" + url + ")", ex) # See if we have a form to submit before video page... form = soup.find('form', { 'id' : 'watch' }) if (form is not None): # Submit the form to be taken to video page. try: # Get params to submit form with. params = {} for elem in form.findAll('input', {'type' : 'hidden' }): params[elem['name']] = elem['value'] #Log("Params: " + str(params)) #Log('Requesting ' + provider_url) # Post to form request = urllib2.Request(provider_url) request.add_header('User-agent', USER_AGENT) request.add_data(urllib.urlencode(params)) response = opener.open(request) soup = BeautifulSoup(response.read()) except Exception, ex: return LogProviderError("Error whilst trying to navigate from initial provider page to video page (" + url + ")", ex) # Read in API Key info and file ID from video page. try: #Log(soup.contents) api_key = re.search("flashvars\.filekey=\"(.*)\"", str(soup.contents)).group(1) file_id = re.search("flashvars\.file=\"(.*)\"", str(soup.contents)).group(1) #Log("API KEY:" + api_key) #Log("File ID:" + file_id) except Exception, ex: return LogProviderError("Error whilst retrieving API Key and File ID. Provider may have changed page layout.", ex) # Get final video location from API. try: # Build up and retrieve API URL api_url = API_URL % ( urlparse.urlparse(provider_url).netloc, file_id, urllib.quote_plus(api_key) ) #Log('Requesting ' + api_url) request = urllib2.Request(api_url) request.add_header('User-agent', USER_AGENT) response = opener.open(request) content = response.read() #Log(content) # API should be HTML form encoded query string. Break it down to get elem we're # interested in. api_info = cgi.parse_qs(content) final_url = api_info['url'][0] except Exception, ex: return LogProviderError("Error whilst retrieving final url from API page.", ex) Log(final_url) oc = ObjectContainer( objects = [ VideoClipObject( items = [ MediaObject( parts = [PartObject(key=final_url)] ) ] ) ] ) # Might as well set a sensible user agent string. oc.user_agent = USER_AGENT return oc # Util methods def LogProviderError(msg="", ex=None): Log("************************** PROVIDER ERROR: " + msg) raise Exception(msg) return [] \ No newline at end of file diff --git a/Contents/Services/URL/PutLocker/ServiceCode.pys b/Contents/Services/URL/PutLocker/ServiceCode.pys index 9538c79..c5fd6cd 100644 --- a/Contents/Services/URL/PutLocker/ServiceCode.pys +++ b/Contents/Services/URL/PutLocker/ServiceCode.pys @@ -1 +1 @@ -import re, urlparse, cgi, urllib, urllib2, cookielib, urlparse from datetime import date from BeautifulSoup import BeautifulSoup from htmlentitydefs import name2codepoint as n2cp import sys USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/534.51.22 (KHTML, like Gecko) Version/5.1.1 Safari/534.51.22' def NormalizeURL(url): #Log("*********** In PutLocker / Sockshare normalizeURL") # Deal with special providerInfo URL built up by plugin to return # info about this provider. For all other normal URLs, do nothing. if ("providerinfo" in url): # Extract out domain. match = re.search("(putlocker|sockshare)", url.lower()) if (match is None): return url try: show = Prefs["show_" + match.group(1)] except Exception, ex: show = False if (show): return url + "&visible=true" else: return url else: return url def MetadataObjectForURL(url): #Log('In MetadataObjectForURL for PutLocker / Sockshare (' + url + ')') return VideoClipObject( title = 'PutLocker / Sockshare Redirect Page', summary = 'PutLocker / Sockshare Redirect Page', thumb = None, ) def MediaObjectsForURL(url): #Log('In MediaObjectsForURL for PutLocker / Sockshare (' + url + ')') return [ MediaObject( parts = [PartObject(key=Callback(PlayVideo, url=url))], ) ] return ret @indirect def PlayVideo(url): putlocker_host = urlparse.urlparse(url).netloc cj = cookielib.CookieJar() opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) # Request Provider page. try: #Log('Requesting ' + url) request = urllib2.Request(url) request.add_header('User-agent', USER_AGENT) response = opener.open(request) # Read in location and content of PutLocker page. soup = BeautifulSoup(response.read()) provider_url = response.geturl() #Log(provider_url) if (provider_url.endswith('?404')): return LogProviderError('Video no longer available (404 Returned)') except Exception, ex: return LogProviderError("Error whilst retrieving initial provider page (" + url + ")", ex) # Read in form info... try: params = {} params['hash'] = soup.find('input', {'name' : 'hash' })['value'] params['confirm'] = "Continue as Free User" #Log(params) except Exception, ex: return LogProviderError("Error whilst retrieving information to go from intial page to next page", ex) # Submit form by re-requesting the same page. try: #Log('Requesting ' + url) request = urllib2.Request(url) request.add_header('User-agent', USER_AGENT) request.add_data(urllib.urlencode(params)) response = opener.open(request) # Read in data from response. content = response.read() #Log(content) except Exception, ex: return LogProviderError("Error whilst retrieving second provider page (" + url + ")", ex) # Look for playlist URL. playlist_res = re.search("playlist: \'(.*?)\'", content) if (playlist_res is None): # Couldn't find playlist URL on page. # Usually, that activates the download link, so try that. soup = BeautifulSoup(content) download_link = soup.find('a','download_file_link') if download_link is None: # No playlist and no download link. Might as well give up. return LogProviderError('Playlist element not found on video page.') else: final_url = "http://" + putlocker_host + decode_htmlentities(download_link['href']) # Get final, final URL. try: #Log('Requesting ' + final_url) request = urllib2.Request(final_url) request.add_header('User-agent', USER_AGENT) request.add_header('Accept-Encoding','gzip, deflate') # Use an URL opener which doesn't follow 302s as otherwise we'll start # to download the video. opener_nofollow = urllib2.build_opener( urllib2.HTTPCookieProcessor(cj),NoRedirectHandler() ) response = opener_nofollow.open(request) final_url = response.headers['Location'] except Exception, ex: return LogProviderError("Error whilst retrieving playlist page (http://" + final_url + ")", ex) else: # We've got a playlist. Get the page and final url. playlist = playlist_res.group(1) #Log(playlist) # Fetch playlist try: #Log('Requesting ' + "http://" + putlocker_host + playlist) request = urllib2.Request("http://" + putlocker_host + playlist) request.add_header('User-agent', USER_AGENT) response = opener.open(request) final = response.read() #Log(final) except Exception, ex: return LogProviderError("Error whilst retrieving playlist page (http://" + putlocker_host + playlist + ")", ex) final_url_res = re.search(" # This will most likely break something at some point. if ( Client.Platform == ClientPlatform.MacOSX or Client.Platform == ClientPlatform.Linux or Client.Platform == ClientPlatform.Windows ): # Plex will send blank refere if this isn't set which is reject by provider final_url = final_url + "|Referer=" + url Log(final_url) oc = ObjectContainer( objects = [ VideoClipObject( items = [ MediaObject( parts = [PartObject(key=final_url)] ) ] ) ] ) # Might as well set a sensible user agent string. oc.user_agent = USER_AGENT return oc # Utility methods. def LogProviderError(msg="", ex=None): Log("************************** PROVIDER ERROR: " + msg) return [] # Replace encoded HTML entities with matching real character. def substitute_entity(match): ent = match.group(3) if match.group(1) == "#": if match.group(2) == '': return unichr(int(ent)) elif match.group(2) == 'x': return unichr(int('0x'+ent, 16)) else: cp = n2cp.get(ent) if cp: return unichr(cp) else: return match.group() # Replace encoded HTML entities with matching real character. def decode_htmlentities(string): entity_re = re.compile(r'&(#?)(x?)(\d{1,5}|\w{1,8});') return entity_re.subn(substitute_entity, string)[0] # urlib2 handler which doesn't automatically follow 302, but which instead returns the new # location. class NoRedirectHandler(urllib2.HTTPRedirectHandler): def http_error_302(self, req, fp, code, msg, headers): infourl = urllib.addinfourl(fp, headers, req.get_full_url()) infourl.status = code infourl.code = code return infourl \ No newline at end of file +import re, urlparse, cgi, urllib, urllib2, cookielib, urlparse from datetime import date from BeautifulSoup import BeautifulSoup from htmlentitydefs import name2codepoint as n2cp import sys USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/534.51.22 (KHTML, like Gecko) Version/5.1.1 Safari/534.51.22' def NormalizeURL(url): #Log("*********** In PutLocker / Sockshare normalizeURL") # Deal with special providerInfo URL built up by plugin to return # info about this provider. For all other normal URLs, do nothing. if ("providerinfo" in url): # Extract out domain. match = re.search("(putlocker|sockshare)", url.lower()) if (match is None): return url try: show = Prefs["show_" + match.group(1)] except Exception, ex: show = False if (show): return url + "&visible=true" else: return url else: return url def MetadataObjectForURL(url): #Log('In MetadataObjectForURL for PutLocker / Sockshare (' + url + ')') return VideoClipObject( title = 'PutLocker / Sockshare Redirect Page', summary = 'PutLocker / Sockshare Redirect Page', thumb = None, ) def MediaObjectsForURL(url): #Log('In MediaObjectsForURL for PutLocker / Sockshare (' + url + ')') return [ MediaObject( parts = [PartObject(key=Callback(PlayVideo, url=url))], ) ] return ret @indirect def PlayVideo(url): putlocker_host = urlparse.urlparse(url).netloc cj = cookielib.CookieJar() opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) # Request Provider page. try: #Log('Requesting ' + url) request = urllib2.Request(url) request.add_header('User-agent', USER_AGENT) response = opener.open(request) # Read in location and content of PutLocker page. soup = BeautifulSoup(response.read()) provider_url = response.geturl() #Log(provider_url) if (provider_url.endswith('?404')): return LogProviderError('Video no longer available (404 Returned)') except Exception, ex: return LogProviderError("Error whilst retrieving initial provider page (" + url + ")", ex) # Read in form info... try: params = {} params['hash'] = soup.find('input', {'name' : 'hash' })['value'] params['confirm'] = "Continue as Free User" #Log(params) except Exception, ex: return LogProviderError("Error whilst retrieving information to go from intial page to next page", ex) # Submit form by re-requesting the same page. try: #Log('Requesting ' + url) request = urllib2.Request(url) request.add_header('User-agent', USER_AGENT) request.add_data(urllib.urlencode(params)) response = opener.open(request) # Read in data from response. content = response.read() #Log(content) except Exception, ex: return LogProviderError("Error whilst retrieving second provider page (" + url + ")", ex) # Look for playlist URL. playlist_res = re.search("playlist: \'(.*?)\'", content) if (playlist_res is None): # Couldn't find playlist URL on page. # Usually, that activates the download link, so try that. soup = BeautifulSoup(content) download_link = soup.find('a','download_file_link') if download_link is None: # No playlist and no download link. Might as well give up. return LogProviderError('Playlist element not found on video page.') else: final_url = "http://" + putlocker_host + decode_htmlentities(download_link['href']) # Get final, final URL. try: #Log('Requesting ' + final_url) request = urllib2.Request(final_url) request.add_header('User-agent', USER_AGENT) request.add_header('Accept-Encoding','gzip, deflate') # Use an URL opener which doesn't follow 302s as otherwise we'll start # to download the video. opener_nofollow = urllib2.build_opener( urllib2.HTTPCookieProcessor(cj),NoRedirectHandler() ) response = opener_nofollow.open(request) final_url = response.headers['Location'] except Exception, ex: return LogProviderError("Error whilst retrieving playlist page (http://" + final_url + ")", ex) else: # We've got a playlist. Get the page and final url. playlist = playlist_res.group(1) #Log(playlist) # Fetch playlist try: #Log('Requesting ' + "http://" + putlocker_host + playlist) request = urllib2.Request("http://" + putlocker_host + playlist) request.add_header('User-agent', USER_AGENT) response = opener.open(request) final = response.read() #Log(final) except Exception, ex: return LogProviderError("Error whilst retrieving playlist page (http://" + putlocker_host + playlist + ")", ex) final_url_res = re.search(" # This will most likely break something at some point. if ( Client.Platform == ClientPlatform.MacOSX or Client.Platform == ClientPlatform.Linux or Client.Platform == ClientPlatform.Windows ): # Plex will send blank refere if this isn't set which is reject by provider final_url = final_url + "|Referer=" + url Log(final_url) oc = ObjectContainer( objects = [ VideoClipObject( items = [ MediaObject( parts = [PartObject(key=final_url)] ) ] ) ] ) # Might as well set a sensible user agent string. oc.user_agent = USER_AGENT return oc # Utility methods. def LogProviderError(msg="", ex=None): Log("************************** PROVIDER ERROR: " + msg) raise Exception(msg) return [] # Replace encoded HTML entities with matching real character. def substitute_entity(match): ent = match.group(3) if match.group(1) == "#": if match.group(2) == '': return unichr(int(ent)) elif match.group(2) == 'x': return unichr(int('0x'+ent, 16)) else: cp = n2cp.get(ent) if cp: return unichr(cp) else: return match.group() # Replace encoded HTML entities with matching real character. def decode_htmlentities(string): entity_re = re.compile(r'&(#?)(x?)(\d{1,5}|\w{1,8});') return entity_re.subn(substitute_entity, string)[0] # urlib2 handler which doesn't automatically follow 302, but which instead returns the new # location. class NoRedirectHandler(urllib2.HTTPRedirectHandler): def http_error_302(self, req, fp, code, msg, headers): infourl = urllib.addinfourl(fp, headers, req.get_full_url()) infourl.status = code infourl.code = code return infourl \ No newline at end of file diff --git a/Contents/Services/URL/VidBux/ServiceCode.pys b/Contents/Services/URL/VidBux/ServiceCode.pys index 2af9927..28b18d4 100644 --- a/Contents/Services/URL/VidBux/ServiceCode.pys +++ b/Contents/Services/URL/VidBux/ServiceCode.pys @@ -1 +1 @@ -import re, urlparse, cgi, urllib2, string from datetime import date from BeautifulSoup import BeautifulSoup USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/534.51.22 (KHTML, like Gecko) Version/5.1.1 Safari/534.51.22' def NormalizeURL(url): #Log("*********** In VidBux / VidxDen normalizeURL") # Deal with special providerInfo URL built up by plugin to return # info about this provider. For all other normal URLs, do nothing. if ("providerinfo" in url): # Extract out domain. match = re.search("(vidbux|vidxden)", url.lower()) if (match is None): return url try: show = Prefs["show_" + match.group(1)] except Exception, ex: show = False if (show): return url + "&visible=true" else: return url else: return url def MetadataObjectForURL(url): #Log('In MetadataObjectForURL for VidBux / VidxDen (' + url + ')') return VideoClipObject( title = 'VidBux / VidxDen Redirect Page', summary = 'VidBux / VidxDen Redirect Page', thumb = None, ) def MediaObjectsForURL(url): #Log('In MetadataObjectForURL for VidBux / VidxDen (' + url + ')') return [ MediaObject( parts = [PartObject(key=Callback(PlayVideo, url=url))], ) ] @indirect def PlayVideo(url): # Request provider page. try: #Log('Requesting ' + url) request = urllib2.Request(url) request.add_header('User-agent', USER_AGENT) response = urllib2.urlopen(request) # Collect final url and read in MovPod page. provider_url = response.geturl() if "404" in provider_url: return LogProviderError('Video no longer available (404 Returned)') soup = BeautifulSoup(response.read()) except Exception, ex: return LogProviderError("Error whilst retrieving initial provider page (" + url + ")", ex) # Extract out these form elements... try: formElems = ['op', 'id', 'fname', 'method_free', 'referer', 'usr_login'] params = {} for formElem in formElems: formElemVal = soup.find('input', {'name' : formElem })['value'] params[formElem] = formElemVal except Exception, ex: # Look for any errors that may have been on page and caused error. err = soup.find('b', { 'style':'color: #CC0000; font-size: medium;' }) if err is not None: return LogProviderError("Provider reachable but has returned following error: " + err.string) else: return LogProviderError("Error whilst retrieving information to go from intial page to next page", ex) # Submit form by re-requesting page with right params. try: #Log(params) #Log(provider_url) HTTP.Headers['User-agent'] = USER_AGENT headers = { 'Referer': url } content = HTTP.Request(provider_url, values=params, headers=headers).content soup = BeautifulSoup(content) except Exception, ex: return LogProviderError("Error whilst retrieving second provider page (" + url + ")", ex) #Log(content) # Extract out JS packed final video URL. script_elems = soup.find('div', { 'id': 'embedcontmvshre' }).findAll('script') elems = None video_url = None for script_elem in script_elems: script = script_elem.string if script is None: continue #Log(script) # Look for substitution values. sub_vals = re.search("\d{2},'([^']*)'.split", script) if (sub_vals is None): continue elems = sub_vals.group(1).split('|') #Log(elems) # Look for url to substitute values into. url_re = re.search("([0-9a-z]*://[0-9a-z]*\.[0-9a-z]*\.[0-9a-z]*\:[0-9a-z]*/([0-9a-z]*/)*[0-9a-z.\-_ ()]*)", script) #Log(url_re.group(1)) if (url_re is None or url_re.group(1) is None): continue video_url = url_re.group(1) if (elems is None or video_url is None): return LogProviderError("Error whilst extracting out / depacking video URL elements", None) # Create dict to map url sub keys to sub values. alphadict = dict() for index_cnt in range(0, 2): index = index_cnt * len(string.digits + string.ascii_lowercase) strindex = str(index_cnt) if index_cnt > 0 else "" for cnt in range(0, len(string.digits + string.ascii_lowercase)): alphadict[strindex + (string.digits + string.ascii_lowercase)[cnt]] = cnt + index def SubElem(matchObj): val = elems[alphadict[matchObj.group(0)]] if (val == ""): val = matchObj.group(0) return val # Sub values into url to get final url. final_url = re.sub("[0-9a-z]{1,2}", SubElem, video_url) Log(final_url) return ObjectContainer( objects = [ VideoClipObject( items = [ MediaObject( parts = [PartObject(key=final_url)] ) ] ) ] ) def LogProviderError(msg="", ex=None): Log("************************** PROVIDER ERROR: " + msg) return [] \ No newline at end of file +import re, urlparse, cgi, urllib2, string from datetime import date from BeautifulSoup import BeautifulSoup USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/534.51.22 (KHTML, like Gecko) Version/5.1.1 Safari/534.51.22' def NormalizeURL(url): #Log("*********** In VidBux / VidxDen normalizeURL") # Deal with special providerInfo URL built up by plugin to return # info about this provider. For all other normal URLs, do nothing. if ("providerinfo" in url): # Extract out domain. match = re.search("(vidbux|vidxden)", url.lower()) if (match is None): return url try: show = Prefs["show_" + match.group(1)] except Exception, ex: show = False if (show): return url + "&visible=true" else: return url else: return url def MetadataObjectForURL(url): #Log('In MetadataObjectForURL for VidBux / VidxDen (' + url + ')') return VideoClipObject( title = 'VidBux / VidxDen Redirect Page', summary = 'VidBux / VidxDen Redirect Page', thumb = None, ) def MediaObjectsForURL(url): #Log('In MetadataObjectForURL for VidBux / VidxDen (' + url + ')') return [ MediaObject( parts = [PartObject(key=Callback(PlayVideo, url=url))], ) ] @indirect def PlayVideo(url): # Request provider page. try: #Log('Requesting ' + url) request = urllib2.Request(url) request.add_header('User-agent', USER_AGENT) response = urllib2.urlopen(request) # Collect final url and read in MovPod page. provider_url = response.geturl() if "404" in provider_url: return LogProviderError('Video no longer available (404 Returned)') soup = BeautifulSoup(response.read()) except Exception, ex: return LogProviderError("Error whilst retrieving initial provider page (" + url + ")", ex) # Extract out these form elements... try: formElems = ['op', 'id', 'fname', 'method_free', 'referer', 'usr_login'] params = {} for formElem in formElems: formElemVal = soup.find('input', {'name' : formElem })['value'] params[formElem] = formElemVal except Exception, ex: # Look for any errors that may have been on page and caused error. err = soup.find('b', { 'style':'color: #CC0000; font-size: medium;' }) if err is not None: return LogProviderError("Provider reachable but has returned following error: " + err.string) else: return LogProviderError("Error whilst retrieving information to go from intial page to next page", ex) # Submit form by re-requesting page with right params. try: #Log(params) #Log(provider_url) HTTP.Headers['User-agent'] = USER_AGENT headers = { 'Referer': url } content = HTTP.Request(provider_url, values=params, headers=headers).content soup = BeautifulSoup(content) except Exception, ex: return LogProviderError("Error whilst retrieving second provider page (" + url + ")", ex) #Log(content) # Extract out JS packed final video URL. script_elems = soup.find('div', { 'id': 'embedcontmvshre' }).findAll('script') elems = None video_url = None for script_elem in script_elems: script = script_elem.string if script is None: continue #Log(script) # Look for substitution values. sub_vals = re.search("\d{2},'([^']*)'.split", script) if (sub_vals is None): continue elems = sub_vals.group(1).split('|') #Log(elems) # Look for url to substitute values into. url_re = re.search("([0-9a-z]*://[0-9a-z]*\.[0-9a-z]*\.[0-9a-z]*\:[0-9a-z]*/([0-9a-z]*/)*[0-9a-z.\-_ ()]*)", script) #Log(url_re.group(1)) if (url_re is None or url_re.group(1) is None): continue video_url = url_re.group(1) if (elems is None or video_url is None): return LogProviderError("Error whilst extracting out / depacking video URL elements", None) # Create dict to map url sub keys to sub values. alphadict = dict() for index_cnt in range(0, 2): index = index_cnt * len(string.digits + string.ascii_lowercase) strindex = str(index_cnt) if index_cnt > 0 else "" for cnt in range(0, len(string.digits + string.ascii_lowercase)): alphadict[strindex + (string.digits + string.ascii_lowercase)[cnt]] = cnt + index def SubElem(matchObj): val = elems[alphadict[matchObj.group(0)]] if (val == ""): val = matchObj.group(0) return val # Sub values into url to get final url. final_url = re.sub("[0-9a-z]{1,2}", SubElem, video_url) Log(final_url) return ObjectContainer( objects = [ VideoClipObject( items = [ MediaObject( parts = [PartObject(key=final_url)] ) ] ) ] ) def LogProviderError(msg="", ex=None): Log("************************** PROVIDER ERROR: " + msg) raise Exception(msg) return [] \ No newline at end of file diff --git a/Contents/Services/URL/VidHog/ServiceCode.pys b/Contents/Services/URL/VidHog/ServiceCode.pys deleted file mode 100644 index 4e28ad6..0000000 --- a/Contents/Services/URL/VidHog/ServiceCode.pys +++ /dev/null @@ -1 +0,0 @@ -import re, urlparse, cgi, time, urllib, urllib2 from datetime import date from BeautifulSoup import BeautifulSoup USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/534.51.22 (KHTML, like Gecko) Version/5.1.1 Safari/534.51.22' def NormalizeURL(url): #Log("*********** In VidhHog normalizeURL") # Deal with special providerInfo URL built up by plugin to return # info about this provider. For all other normal URLs, do nothing. if ("providerinfo" in url): try: show = Prefs["show_vidhog"] except Exception, ex: show = True if (show): return url + "&visible=true" else: return url else: return url def MetadataObjectForURL(url): #Log('In MetadataObjectForURL for VidHog (' + url + ')') # LMWT Plugin should have access to info about this URL if user used plugin to launch video. video = LMWTGetVideoClipObjectFromMediaInfo(url) if video is None: video = VideoClipObject( title = 'VidHog Redirect Page', summary = 'VidHog Redirect Page', thumb = None, ) return video def MediaObjectsForURL(url): # Page flow from start to video is: # - Initial Page # - Countdown Page # - Video Link Page # # This plugin treats the pages as a two step process where each step can be carried # out indepedently or one after the other. # # - To run this a single step, pass in the video URL. # - To stop after the timer has been initialised and return the URL of the video link page, # append nowait=true to the query string. # - To retrieve the video after the timer has elapsed, pass in the URL returned by calling this # function with nowait=true. #Log("*********************************************************************") stage1 = True stage2 = True # Work out what parts of the process we need to run. if ("op=download2" in url): # Only intersted in second part of process as passed in URL is for Video Link page. # Note that it's expected that the correct delay has been observed by whoever's passing # in this URL. stage1 = False stage2_URL = url if ("nowait=true" in url): # Caller doesn't want us to wait if countdown page has a delay. stage2 = False # Remove arg from url. url_parts = urlparse.urlparse(url) url_parts = urlparse.ParseResult(url_parts.scheme, url_parts.netloc, url_parts.path, "", "", "") url = url_parts.geturl() if (stage1): # Deal with initial page. try: request = urllib2.Request(url) request.add_header('User-agent', USER_AGENT) response = urllib2.urlopen(request) #Log("Requesting: " + url) soup = BeautifulSoup(response.read()) except Exception, ex: return LogProviderError("Error whilst retrieving initial provider page (" + url + ")", ex) #Log(str(soup)) # Look for any errors from provider. errors = soup.find('font',{ 'class':'err'}) if (errors is not None): return LogProviderError("Provider reachable but has returned following error: " + errors.string) # Extract out these form elements... formElems = ['op', 'id', 'fname', 'method_free', 'referer', 'usr_login'] params = {} try: for formElem in formElems: formElemVal = soup.find('input', {'name' : formElem })['value'] params[formElem] = formElemVal except Exception, ex: return LogProviderError("Error whilst retrieving information to go from intial page to countdown page", ex) # Submit form with extracted elements. headers = { 'Referer': url } try: soup = BeautifulSoup(HTTP.Request(url,values=params,headers=headers).content) except Exception, ex: return LogProviderError("Error whilst retrieving countdown page (" + url + ")", ex) # Deal with the timer page. # Extract out these form elements... formElems = ['down_direct', 'id', 'method_free', 'method_premium', 'op', 'rand', 'referer'] params = {} try: for formElem in formElems: formElemVal = soup.find('input', {'name' : formElem })['value'] params[formElem] = formElemVal except Exception, ex: return LogProviderError("Error whilst retrieving information to go from countdown page to download page", ex) #Log("*********************************************************************") #Log("*********************************************************************") #Log("Params:" + str(params)) # Extract out delay to wait. try: delay = int(soup.find('span', id='countdown_str').span.string) except (Exception, ex): return LogProviderError("Delay not found on download page. Has something changed?", ex) #Log("Delay: " + str(delay)) # Create a stage 2 URL. This will be broken down again for stage 2. stage2_URL = url + "?" + urllib.urlencode(params) #Log("Stage 2 url: " + stage2_URL) if (not stage2): ret = [] ret.append( MediaObject( parts = [PartObject(key=stage2_URL, duration=delay)], ) ) return ret else: # Looks like the page is happy for us to do the waiting.... so wait. time.sleep(delay) if (stage2): # Breakdown stage2 URL to get params back. url_parts = urlparse.urlparse(stage2_URL) #Log(str(url_parts)) params = cgi.parse_qsl(url_parts.query) # Remove query string arguments we added to form URL to get form URL back. url_parts = urlparse.ParseResult(url_parts.scheme, url_parts.netloc, url_parts.path, "", "", "") #Log(str(url_parts)) form_url = url_parts.geturl() #Log("*********************************************************************") #Log("*********************************************************************") #Log("Params:" + str(params)) #Log("Form URL:" + form_url) # Submit form. request = urllib2.Request(url, urllib.urlencode(params)) request.add_header('User-agent', USER_AGENT) request.add_header('Referer', form_url) response = urllib2.urlopen(request) soup = BeautifulSoup(response.read()) #Log(str(soup)) # Extract out video URL. final_url = soup.find('div', { 'class':'content-bg'}).center.strong.a['href'] Log(final_url) LMWTPlaybackStarted(url) ret = [] ret.append( MediaObject( parts = [PartObject(key=final_url)], ) ) return ret def LogProviderError(msg="", ex=None): Log("************************** PROVIDER ERROR: " + msg) return [] # LMWT Plugin specific helper methods. def LMWTPlaybackStarted(url): # Bad things can happen here. Still want to run rest of code if possible though... try: request = urllib2.Request("http://127.0.0.1:32400/video/lmwt/playback/%s" % String.Encode(url)) response = urllib2.urlopen(request) except Exception, ex: pass def LMWTGetVideoClipObjectFromMediaInfo(url): # Bad things can happen here. Still want to run rest of code if possible though... try: request = urllib2.Request("http://127.0.0.1:32400/video/lmwt/mediainfo/%s" % String.Encode(url)) mediainfo = JSON.ObjectFromString(urllib2.urlopen(request).read()) return VideoClipObject( title=mediainfo['title'], summary=mediainfo['summary'], art=mediainfo['background'], thumb= mediainfo['poster'], rating = float(mediainfo['rating']), duration=mediainfo['duration'], year=mediainfo['year'], originally_available_at= ( date.fromordinal(mediainfo['release_date']) if ('release_date' in mediainfo and mediainfo['release_date']) else None ), genres=mediainfo['genres'], ) except Exception, ex: return None \ No newline at end of file diff --git a/Contents/Services/URL/VidHog/ServicePrefs.json b/Contents/Services/URL/VidHog/ServicePrefs.json deleted file mode 100644 index 903a60d..0000000 --- a/Contents/Services/URL/VidHog/ServicePrefs.json +++ /dev/null @@ -1 +0,0 @@ -[ { "id": "show_vidhog", "label": "VidHog", "type": "bool", "default": "true", }, ] \ No newline at end of file diff --git a/Contents/Services/URL/VuReel/ServiceCode.pys b/Contents/Services/URL/VuReel/ServiceCode.pys index d3a5dec..98c2bdc 100644 --- a/Contents/Services/URL/VuReel/ServiceCode.pys +++ b/Contents/Services/URL/VuReel/ServiceCode.pys @@ -1 +1 @@ -import re, urllib2 from datetime import date USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/534.51.22 (KHTML, like Gecko) Version/5.1.1 Safari/534.51.22' def NormalizeURL(url): #Log("*********** In MovShare normalizeURL") # Deal with special providerInfo URL built up by plugin to return # info about this provider. For all other normal URLs, do nothing. if ("providerinfo" in url): # Extract out domain. match = re.search("(vureel)", url.lower()) if (match is None): return url try: show = Prefs["show_" + match.group(1)] except Exception, ex: show = False if (show): return url + "&visible=true" else: return url else: return url def MetadataObjectForURL(url): #Log('In MetadataObjectForURL for MovShare (' + url + ')') return VideoClipObject( title = 'VuReel Redirect Page', summary = 'VuReel Redirect Page', thumb = None, ) def MediaObjectsForURL(url): #Log('In MediaObjectsForURL for MovShare (' + url + ')') return [ MediaObject( parts = [PartObject(key=Callback(PlayVideo, url=url))], ) ] @indirect def PlayVideo(url): HTTP.Headers['User-Agent'] = USER_AGENT # Request Initial Provider page. try: #Log('Requesting ' + url) contents = HTTP.Request(url).content except Exception, ex: return LogProviderError("Error whilst retrieving initial provider page (" + url + ")", ex) # Read in file location from video page. try: #Log(contents) final_url = re.search("file: \"(.*)\"", contents).group(1) except Exception, ex: return LogProviderError("Error whilst retrieving File Location. Provider may have changed page layout.", ex) Log(final_url) oc = ObjectContainer( objects = [ VideoClipObject( items = [ MediaObject( parts = [PartObject(key=final_url)] ) ] ) ] ) # Might as well set a sensible user agent string. oc.user_agent = USER_AGENT return oc def LogProviderError(msg="", ex=None): Log("************************** PROVIDER ERROR: " + msg) return [] \ No newline at end of file +import re, urllib2 from datetime import date USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/534.51.22 (KHTML, like Gecko) Version/5.1.1 Safari/534.51.22' def NormalizeURL(url): #Log("*********** In MovShare normalizeURL") # Deal with special providerInfo URL built up by plugin to return # info about this provider. For all other normal URLs, do nothing. if ("providerinfo" in url): # Extract out domain. match = re.search("(vureel)", url.lower()) if (match is None): return url try: show = Prefs["show_" + match.group(1)] except Exception, ex: show = False if (show): return url + "&visible=true" else: return url else: return url def MetadataObjectForURL(url): #Log('In MetadataObjectForURL for MovShare (' + url + ')') return VideoClipObject( title = 'VuReel Redirect Page', summary = 'VuReel Redirect Page', thumb = None, ) def MediaObjectsForURL(url): #Log('In MediaObjectsForURL for MovShare (' + url + ')') return [ MediaObject( parts = [PartObject(key=Callback(PlayVideo, url=url))], ) ] @indirect def PlayVideo(url): HTTP.Headers['User-Agent'] = USER_AGENT # Request Initial Provider page. try: #Log('Requesting ' + url) contents = HTTP.Request(url).content except Exception, ex: return LogProviderError("Error whilst retrieving initial provider page (" + url + ")", ex) # Read in file location from video page. try: #Log(contents) final_url = re.search("file: \"(.*)\"", contents).group(1) except Exception, ex: return LogProviderError("Error whilst retrieving File Location. Provider may have changed page layout.", ex) Log(final_url) oc = ObjectContainer( objects = [ VideoClipObject( items = [ MediaObject( parts = [PartObject(key=final_url)] ) ] ) ] ) # Might as well set a sensible user agent string. oc.user_agent = USER_AGENT return oc def LogProviderError(msg="", ex=None): Log("************************** PROVIDER ERROR: " + msg) raise Exception(msg) return [] \ No newline at end of file diff --git a/Contents/Services/URL/Zalaa/ServiceCode.pys b/Contents/Services/URL/Zalaa/ServiceCode.pys index f36694c..97d4ce8 100644 --- a/Contents/Services/URL/Zalaa/ServiceCode.pys +++ b/Contents/Services/URL/Zalaa/ServiceCode.pys @@ -1 +1 @@ -import re, urlparse, cgi, urllib, urllib2, cookielib, urlparse, string from datetime import date from BeautifulSoup import BeautifulSoup USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/534.51.22 (KHTML, like Gecko) Version/5.1.1 Safari/534.51.22' def NormalizeURL(url): #Log("*********** In Zalaa normalizeURL") # Deal with special providerInfo URL built up by plugin to return # info about this provider. For all other normal URLs, do nothing. if ("providerinfo" in url): match = re.search("(zalaa|uploadc)", url.lower()) if (match is None): return url try: show = Prefs["show_" + match.group(1)] except Exception, ex: show = False if (show): return url + "&visible=true" else: return url else: return url def MetadataObjectForURL(url): #Log('In MetadataObjectForURL for Zalaa (' + url + ')') # LMWT Plugin should have access to info about this URL if user used plugin to launch video. return VideoClipObject( title = 'Zalaa Redirect Page', summary = 'Zalaa Redirect Page', thumb = None, ) def MediaObjectsForURL(url): #Log('In MetadataObjectForURL for Zalaa (' + url + ')') return [ MediaObject( parts = [PartObject(key=Callback(PlayVideo, url=url))], ) ] @indirect def PlayVideo(url): #Log('In MediaObjectsForURL for Zalaa (' + url + ')') ### 1st Page # Request provider page. try: #Log('Requesting ' + url) request = urllib2.Request(url) request.add_header('User-agent', USER_AGENT) response = urllib2.urlopen(request) # Collect final url (in case of 302 or the like) and read in page. provider_url = response.geturl() #Log(provider_url) soup = BeautifulSoup(response.read()) except Exception, ex: return LogProviderError("Error whilst retrieving initial provider page (" + url + ")", ex) # Extract out these form elements from the provider page... try: formElems = ['op', 'id', 'fname', 'method_free', 'referer', 'usr_login', 'ipcount_val'] params = {} for formElem in formElems: formElemVal = soup.find('input', {'name' : formElem })['value'] params[formElem] = formElemVal except Exception, ex: return LogProviderError("Error whilst retrieving information to go from intial page to next page. Page URL was: " + url, ex) #Log(params) ### 2nd Page # Submit the form from the 1st page to get 2nd page. try: HTTP.Headers['User-agent'] = USER_AGENT headers = { 'Referer': provider_url } content = HTTP.Request(provider_url, cacheTime=0, values=params, headers=headers).content except Exception, ex: return LogProviderError("Error whilst retrieving 2nd page (" + provider_url + ")", ex) soup = BeautifulSoup(content) #Log(content) ### 3rd Page (if needed....) # This looks to be a new page but may not have been rolled out everywhere. # so see if it's part of the nav we're going through rather than blindly # assuming it'll be there. if (soup.find('td', {'id': 'btndnlbt' }) is not None): try: headers = { 'Referer': provider_url } dld_url = soup.find('td', {'id': 'btndnlbt' }).a['href'] content = HTTP.Request(dld_url, cacheTime=0, headers=headers).content except Exception, ex: return LogProviderError("Error whilst retrieving 3rd page (" + dld_url + ")", ex) soup = BeautifulSoup(content) final_url = None # See how we're going to retrieve the file name.... if (soup.find('span', { 'id' : 'flvplayerid' })) is None: try: script = soup.find('script', text=re.compile("function\(p,a,c,k,e,d\)")) #Log(script) # Look for substitution values. elems = re.search("\d{2},'([^']*)'.split", script).group(1).split('|') #Log(elems) # Look for url to substitute values into. video_url = re.search("([0-9a-z]*://[0-9a-z]*\.[0-9a-z]*\.[0-9a-z]*\:[0-9a-z]*/([0-9a-z]*/?)*[0-9a-z.\-_ ()]*)", script) #Log(video_url) # Create dict to map url sub keys to sub values. alphadict = dict() for index_cnt in range(0, 2): index = index_cnt * len(string.digits + string.ascii_lowercase) strindex = str(index_cnt) if index_cnt > 0 else "" for cnt in range(0, len(string.digits + string.ascii_lowercase)): alphadict[strindex + (string.digits + string.ascii_lowercase)[cnt]] = cnt + index def SubElem(matchObj): val = elems[alphadict[matchObj.group(0)]] if (val == ""): val = matchObj.group(0) return val # Sub values into url. final_url = re.sub("[0-9a-z]{1,2}", SubElem, video_url.group(1)) except Exception, ex: return LogProviderError("Error whilst unpacking video URL.", ex) else: # Get file URL. #Log("Using FLV Player") file = re.search("s1.addVariable\('file','([^']*)'\);", content) if file is None: return LogProviderError("Error whilst extractingt out video URL from FLV player config.") final_url = file.group(1) Log(final_url) return ObjectContainer( objects = [ VideoClipObject( items = [ MediaObject( parts = [PartObject(key=final_url)] ) ] ) ] ) def LogProviderError(msg="", ex=None): Log("************************** PROVIDER ERROR: " + msg) return [] \ No newline at end of file +import re, urlparse, cgi, urllib, urllib2, cookielib, urlparse, string from datetime import date from BeautifulSoup import BeautifulSoup USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/534.51.22 (KHTML, like Gecko) Version/5.1.1 Safari/534.51.22' def NormalizeURL(url): #Log("*********** In Zalaa normalizeURL") # Deal with special providerInfo URL built up by plugin to return # info about this provider. For all other normal URLs, do nothing. if ("providerinfo" in url): match = re.search("(zalaa|uploadc)", url.lower()) if (match is None): return url try: show = Prefs["show_" + match.group(1)] except Exception, ex: show = False if (show): return url + "&visible=true" else: return url else: return url def MetadataObjectForURL(url): #Log('In MetadataObjectForURL for Zalaa (' + url + ')') # LMWT Plugin should have access to info about this URL if user used plugin to launch video. return VideoClipObject( title = 'Zalaa Redirect Page', summary = 'Zalaa Redirect Page', thumb = None, ) def MediaObjectsForURL(url): #Log('In MetadataObjectForURL for Zalaa (' + url + ')') return [ MediaObject( parts = [PartObject(key=Callback(PlayVideo, url=url))], ) ] @indirect def PlayVideo(url): #Log('In MediaObjectsForURL for Zalaa (' + url + ')') ### 1st Page # Request provider page. try: #Log('Requesting ' + url) request = urllib2.Request(url) request.add_header('User-agent', USER_AGENT) response = urllib2.urlopen(request) # Collect final url (in case of 302 or the like) and read in page. provider_url = response.geturl() #Log(provider_url) soup = BeautifulSoup(response.read()) except Exception, ex: return LogProviderError("Error whilst retrieving initial provider page (" + url + ")", ex) # Extract out these form elements from the provider page... try: formElems = ['op', 'id', 'fname', 'method_free', 'referer', 'usr_login', 'ipcount_val'] params = {} for formElem in formElems: formElemVal = soup.find('input', {'name' : formElem })['value'] params[formElem] = formElemVal except Exception, ex: return LogProviderError("Error whilst retrieving information to go from intial page to next page. Page URL was: " + url, ex) #Log(params) ### 2nd Page # Submit the form from the 1st page to get 2nd page. try: HTTP.Headers['User-agent'] = USER_AGENT headers = { 'Referer': provider_url } content = HTTP.Request(provider_url, cacheTime=0, values=params, headers=headers).content except Exception, ex: return LogProviderError("Error whilst retrieving 2nd page (" + provider_url + ")", ex) soup = BeautifulSoup(content) #Log(content) ### 3rd Page (if needed....) # This looks to be a new page but may not have been rolled out everywhere. # so see if it's part of the nav we're going through rather than blindly # assuming it'll be there. if (soup.find('td', {'id': 'btndnlbt' }) is not None): try: headers = { 'Referer': provider_url } dld_url = soup.find('td', {'id': 'btndnlbt' }).a['href'] content = HTTP.Request(dld_url, cacheTime=0, headers=headers).content except Exception, ex: return LogProviderError("Error whilst retrieving 3rd page (" + dld_url + ")", ex) soup = BeautifulSoup(content) final_url = None # See how we're going to retrieve the file name.... if (soup.find('span', { 'id' : 'flvplayerid' })) is None: try: script = soup.find('script', text=re.compile("function\(p,a,c,k,e,d\)")) #Log(script) # Look for substitution values. elems = re.search("\d{2},'([^']*)'.split", script).group(1).split('|') #Log(elems) # Look for url to substitute values into. video_url = re.search("([0-9a-z]*://[0-9a-z]*\.[0-9a-z]*\.[0-9a-z]*\:[0-9a-z]*/([0-9a-z]*/?)*[0-9a-z.\-_ ()]*)", script) #Log(video_url) # Create dict to map url sub keys to sub values. alphadict = dict() for index_cnt in range(0, 2): index = index_cnt * len(string.digits + string.ascii_lowercase) strindex = str(index_cnt) if index_cnt > 0 else "" for cnt in range(0, len(string.digits + string.ascii_lowercase)): alphadict[strindex + (string.digits + string.ascii_lowercase)[cnt]] = cnt + index def SubElem(matchObj): val = elems[alphadict[matchObj.group(0)]] if (val == ""): val = matchObj.group(0) return val # Sub values into url. final_url = re.sub("[0-9a-z]{1,2}", SubElem, video_url.group(1)) except Exception, ex: return LogProviderError("Error whilst unpacking video URL.", ex) else: # Get file URL. #Log("Using FLV Player") file = re.search("s1.addVariable\('file','([^']*)'\);", content) if file is None: return LogProviderError("Error whilst extractingt out video URL from FLV player config.") final_url = file.group(1) Log(final_url) return ObjectContainer( objects = [ VideoClipObject( items = [ MediaObject( parts = [PartObject(key=final_url)] ) ] ) ] ) def LogProviderError(msg="", ex=None): Log("************************** PROVIDER ERROR: " + msg) raise Exception(msg) return [] \ No newline at end of file