diff --git a/README.md b/README.md index 7e41be902..c2909279d 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ NewPipe Extractor is available at JitPack's Maven repo. If you're using Gradle, you could add NewPipe Extractor as a dependency with the following steps: 1. Add `maven { url 'https://jitpack.io' }` to the `repositories` in your `build.gradle`. -2. Add `implementation 'com.github.TeamNewPipe:NewPipeExtractor:v0.19.5'`the `dependencies` in your `build.gradle`. Replace `v0.19.5` with the latest release. +2. Add `implementation 'com.github.TeamNewPipe:NewPipeExtractor:v0.19.7'`the `dependencies` in your `build.gradle`. Replace `v0.19.7` with the latest release. ### Testing changes diff --git a/build.gradle b/build.gradle index 9a2f92e14..70da643be 100644 --- a/build.gradle +++ b/build.gradle @@ -5,7 +5,7 @@ allprojects { sourceCompatibility = 1.7 targetCompatibility = 1.7 - version 'v0.19.5' + version 'v0.19.7' group 'com.github.TeamNewPipe' repositories { @@ -15,7 +15,7 @@ allprojects { } dependencies { - implementation project(':extractor') + api project(':extractor') implementation project(':timeago-parser') } diff --git a/extractor/build.gradle b/extractor/build.gradle index 205115870..b8f668ef0 100644 --- a/extractor/build.gradle +++ b/extractor/build.gradle @@ -8,4 +8,5 @@ dependencies { implementation 'org.nibor.autolink:autolink:0.10.0' testImplementation 'junit:junit:4.13' + testImplementation "com.squareup.okhttp3:okhttp:3.12.11" } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/ListExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/ListExtractor.java index aae591eb6..e53dbbef9 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/ListExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/ListExtractor.java @@ -2,7 +2,6 @@ package org.schabi.newpipe.extractor; import org.schabi.newpipe.extractor.exceptions.ExtractionException; import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler; -import org.schabi.newpipe.extractor.utils.Utils; import java.io.IOException; import java.util.Collections; @@ -10,13 +9,11 @@ import java.util.List; import javax.annotation.Nonnull; -import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty; /** * Base class to extractors that have a list (e.g. playlists, users). */ public abstract class ListExtractor extends Extractor { - /** * Constant that should be returned whenever * a list has an unknown number of items. @@ -38,36 +35,22 @@ public abstract class ListExtractor extends Extractor { } /** - * A {@link InfoItemsPage InfoItemsPage} corresponding to the initial page where the items are from the initial request and - * the nextPageUrl relative to it. + * A {@link InfoItemsPage InfoItemsPage} corresponding to the initial page + * where the items are from the initial request and the nextPage relative to it. * * @return a {@link InfoItemsPage} corresponding to the initial page */ @Nonnull public abstract InfoItemsPage getInitialPage() throws IOException, ExtractionException; - /** - * Returns an url that can be used to get the next page relative to the initial one. - *

Usually, these links will only work in the implementation itself.

- * - * @return an url pointing to the next page relative to the initial page - * @see #getPage(String) - */ - public abstract String getNextPageUrl() throws IOException, ExtractionException; - /** * Get a list of items corresponding to the specific requested page. * - * @param pageUrl any page url got from the exclusive implementation of the list extractor + * @param page any page got from the exclusive implementation of the list extractor * @return a {@link InfoItemsPage} corresponding to the requested page - * @see #getNextPageUrl() - * @see InfoItemsPage#getNextPageUrl() + * @see InfoItemsPage#getNextPage() */ - public abstract InfoItemsPage getPage(final String pageUrl) throws IOException, ExtractionException; - - public boolean hasNextPage() throws IOException, ExtractionException { - return !isNullOrEmpty(getNextPageUrl()); - } + public abstract InfoItemsPage getPage(final Page page) throws IOException, ExtractionException; @Override public ListLinkHandler getLinkHandler() { @@ -80,23 +63,22 @@ public abstract class ListExtractor extends Extractor { /** * A class that is used to wrap a list of gathered items and eventual errors, it - * also contains a field that points to the next available page ({@link #nextPageUrl}). + * also contains a field that points to the next available page ({@link #nextPage}). */ public static class InfoItemsPage { private static final InfoItemsPage EMPTY = - new InfoItemsPage<>(Collections.emptyList(), "", Collections.emptyList()); + new InfoItemsPage<>(Collections.emptyList(), null, Collections.emptyList()); /** * A convenient method that returns a representation of an empty page. * - * @return a type-safe page with the list of items and errors empty and the nextPageUrl set to an empty string. + * @return a type-safe page with the list of items and errors empty and the nextPage set to {@code null}. */ public static InfoItemsPage emptyPage() { //noinspection unchecked return (InfoItemsPage) EMPTY; } - /** * The current list of items of this page */ @@ -105,40 +87,40 @@ public abstract class ListExtractor extends Extractor { /** * Url pointing to the next page relative to this one * - * @see ListExtractor#getPage(String) + * @see ListExtractor#getPage(Page) + * @see Page */ - private final String nextPageUrl; + private final Page nextPage; /** * Errors that happened during the extraction */ private final List errors; - public InfoItemsPage(InfoItemsCollector collector, String nextPageUrl) { - this(collector.getItems(), nextPageUrl, collector.getErrors()); + public InfoItemsPage(InfoItemsCollector collector, Page nextPage) { + this(collector.getItems(), nextPage, collector.getErrors()); } - public InfoItemsPage(List itemsList, String nextPageUrl, List errors) { + public InfoItemsPage(List itemsList, Page nextPage, List errors) { this.itemsList = itemsList; - this.nextPageUrl = nextPageUrl; + this.nextPage = nextPage; this.errors = errors; } public boolean hasNextPage() { - return !isNullOrEmpty(nextPageUrl); + return Page.isValid(nextPage); } public List getItems() { return itemsList; } - public String getNextPageUrl() { - return nextPageUrl; + public Page getNextPage() { + return nextPage; } public List getErrors() { return errors; } } - } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/ListInfo.java b/extractor/src/main/java/org/schabi/newpipe/extractor/ListInfo.java index 38177e1e4..c06c24c44 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/ListInfo.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/ListInfo.java @@ -4,11 +4,9 @@ import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler; import java.util.List; -import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty; - public abstract class ListInfo extends Info { private List relatedItems; - private String nextPageUrl = null; + private Page nextPage = null; private final List contentFilters; private final String sortFilter; @@ -39,15 +37,15 @@ public abstract class ListInfo extends Info { } public boolean hasNextPage() { - return !isNullOrEmpty(nextPageUrl); + return Page.isValid(nextPage); } - public String getNextPageUrl() { - return nextPageUrl; + public Page getNextPage() { + return nextPage; } - public void setNextPageUrl(String pageUrl) { - this.nextPageUrl = pageUrl; + public void setNextPage(Page page) { + this.nextPage = page; } public List getContentFilters() { diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/Page.java b/extractor/src/main/java/org/schabi/newpipe/extractor/Page.java new file mode 100644 index 000000000..e4faae778 --- /dev/null +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/Page.java @@ -0,0 +1,62 @@ +package org.schabi.newpipe.extractor; + +import java.io.Serializable; +import java.util.List; +import java.util.Map; + +import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty; + +public class Page implements Serializable { + private final String url; + private final String id; + private final List ids; + private final Map cookies; + + public Page(final String url, final String id, final List ids, final Map cookies) { + this.url = url; + this.id = id; + this.ids = ids; + this.cookies = cookies; + } + + public Page(final String url) { + this(url, null, null, null); + } + + public Page(final String url, final String id) { + this(url, id, null, null); + } + + public Page(final String url, final Map cookies) { + this(url, null, null, cookies); + } + + public Page(final List ids) { + this(null, null, ids, null); + } + + public Page(final List ids, final Map cookies) { + this(null, null, ids, cookies); + } + + public String getUrl() { + return url; + } + + public String getId() { + return id; + } + + public List getIds() { + return ids; + } + + public Map getCookies() { + return cookies; + } + + public static boolean isValid(final Page page) { + return page != null && (!isNullOrEmpty(page.getUrl()) + || !isNullOrEmpty(page.getIds())); + } +} diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/channel/ChannelInfo.java b/extractor/src/main/java/org/schabi/newpipe/extractor/channel/ChannelInfo.java index 52f18eadc..6863e6028 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/channel/ChannelInfo.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/channel/ChannelInfo.java @@ -3,6 +3,7 @@ package org.schabi.newpipe.extractor.channel; import org.schabi.newpipe.extractor.ListExtractor.InfoItemsPage; import org.schabi.newpipe.extractor.ListInfo; import org.schabi.newpipe.extractor.NewPipe; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.exceptions.ExtractionException; import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler; @@ -49,8 +50,8 @@ public class ChannelInfo extends ListInfo { public static InfoItemsPage getMoreItems(StreamingService service, String url, - String pageUrl) throws IOException, ExtractionException { - return service.getChannelExtractor(url).getPage(pageUrl); + Page page) throws IOException, ExtractionException { + return service.getChannelExtractor(url).getPage(page); } public static ChannelInfo getInfo(ChannelExtractor extractor) throws IOException, ExtractionException { @@ -81,7 +82,7 @@ public class ChannelInfo extends ListInfo { final InfoItemsPage itemsPage = ExtractorHelper.getItemsPageOrLogError(info, extractor); info.setRelatedItems(itemsPage.getItems()); - info.setNextPageUrl(itemsPage.getNextPageUrl()); + info.setNextPage(itemsPage.getNextPage()); try { info.setSubscriberCount(extractor.getSubscriberCount()); diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/comments/CommentsInfo.java b/extractor/src/main/java/org/schabi/newpipe/extractor/comments/CommentsInfo.java index e07c67464..e5f25527e 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/comments/CommentsInfo.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/comments/CommentsInfo.java @@ -3,6 +3,7 @@ package org.schabi.newpipe.extractor.comments; import org.schabi.newpipe.extractor.ListExtractor.InfoItemsPage; import org.schabi.newpipe.extractor.ListInfo; import org.schabi.newpipe.extractor.NewPipe; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.exceptions.ExtractionException; import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler; @@ -39,23 +40,23 @@ public class CommentsInfo extends ListInfo { InfoItemsPage initialCommentsPage = ExtractorHelper.getItemsPageOrLogError(commentsInfo, commentsExtractor); commentsInfo.setRelatedItems(initialCommentsPage.getItems()); - commentsInfo.setNextPageUrl(initialCommentsPage.getNextPageUrl()); + commentsInfo.setNextPage(initialCommentsPage.getNextPage()); return commentsInfo; } - public static InfoItemsPage getMoreItems(CommentsInfo commentsInfo, String pageUrl) + public static InfoItemsPage getMoreItems(CommentsInfo commentsInfo, Page page) throws ExtractionException, IOException { - return getMoreItems(NewPipe.getService(commentsInfo.getServiceId()), commentsInfo, pageUrl); + return getMoreItems(NewPipe.getService(commentsInfo.getServiceId()), commentsInfo, page); } public static InfoItemsPage getMoreItems(StreamingService service, CommentsInfo commentsInfo, - String pageUrl) throws IOException, ExtractionException { + Page page) throws IOException, ExtractionException { if (null == commentsInfo.getCommentsExtractor()) { commentsInfo.setCommentsExtractor(service.getCommentsExtractor(commentsInfo.getUrl())); commentsInfo.getCommentsExtractor().fetchPage(); } - return commentsInfo.getCommentsExtractor().getPage(pageUrl); + return commentsInfo.getCommentsExtractor().getPage(page); } private transient CommentsExtractor commentsExtractor; diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/feed/FeedInfo.java b/extractor/src/main/java/org/schabi/newpipe/extractor/feed/FeedInfo.java index f361cec7e..03fe78f6d 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/feed/FeedInfo.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/feed/FeedInfo.java @@ -45,7 +45,7 @@ public class FeedInfo extends ListInfo { final InfoItemsPage itemsPage = ExtractorHelper.getItemsPageOrLogError(info, extractor); info.setRelatedItems(itemsPage.getItems()); - info.setNextPageUrl(itemsPage.getNextPageUrl()); + info.setNextPage(itemsPage.getNextPage()); return info; } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/kiosk/KioskInfo.java b/extractor/src/main/java/org/schabi/newpipe/extractor/kiosk/KioskInfo.java index 521f6b9e9..e054214e2 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/kiosk/KioskInfo.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/kiosk/KioskInfo.java @@ -23,6 +23,7 @@ package org.schabi.newpipe.extractor.kiosk; import org.schabi.newpipe.extractor.ListExtractor; import org.schabi.newpipe.extractor.ListInfo; import org.schabi.newpipe.extractor.NewPipe; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.exceptions.ExtractionException; import org.schabi.newpipe.extractor.exceptions.ParsingException; @@ -33,18 +34,17 @@ import org.schabi.newpipe.extractor.utils.ExtractorHelper; import java.io.IOException; public class KioskInfo extends ListInfo { - private KioskInfo(int serviceId, ListLinkHandler linkHandler, String name) throws ParsingException { super(serviceId, linkHandler, name); } public static ListExtractor.InfoItemsPage getMoreItems(StreamingService service, String url, - String pageUrl) + Page page) throws IOException, ExtractionException { KioskList kl = service.getKioskList(); - KioskExtractor extractor = kl.getExtractorByUrl(url, pageUrl); - return extractor.getPage(pageUrl); + KioskExtractor extractor = kl.getExtractorByUrl(url, page); + return extractor.getPage(page); } public static KioskInfo getInfo(String url) throws IOException, ExtractionException { @@ -71,7 +71,7 @@ public class KioskInfo extends ListInfo { final ListExtractor.InfoItemsPage itemsPage = ExtractorHelper.getItemsPageOrLogError(info, extractor); info.setRelatedItems(itemsPage.getItems()); - info.setNextPageUrl(itemsPage.getNextPageUrl()); + info.setNextPage(itemsPage.getNextPage()); return info; } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/kiosk/KioskList.java b/extractor/src/main/java/org/schabi/newpipe/extractor/kiosk/KioskList.java index 878fa8ce5..5eef4adb6 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/kiosk/KioskList.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/kiosk/KioskList.java @@ -1,6 +1,7 @@ package org.schabi.newpipe.extractor.kiosk; import org.schabi.newpipe.extractor.NewPipe; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.exceptions.ExtractionException; import org.schabi.newpipe.extractor.linkhandler.ListLinkHandlerFactory; @@ -59,23 +60,23 @@ public class KioskList { public KioskExtractor getDefaultKioskExtractor() throws ExtractionException, IOException { - return getDefaultKioskExtractor(""); + return getDefaultKioskExtractor(null); } - public KioskExtractor getDefaultKioskExtractor(String nextPageUrl) + public KioskExtractor getDefaultKioskExtractor(Page nextPage) throws ExtractionException, IOException { - return getDefaultKioskExtractor(nextPageUrl, NewPipe.getPreferredLocalization()); + return getDefaultKioskExtractor(nextPage, NewPipe.getPreferredLocalization()); } - public KioskExtractor getDefaultKioskExtractor(String nextPageUrl, Localization localization) + public KioskExtractor getDefaultKioskExtractor(Page nextPage, Localization localization) throws ExtractionException, IOException { if (defaultKiosk != null && !defaultKiosk.equals("")) { - return getExtractorById(defaultKiosk, nextPageUrl, localization); + return getExtractorById(defaultKiosk, nextPage, localization); } else { if (!kioskList.isEmpty()) { // if not set get any entry Object[] keySet = kioskList.keySet().toArray(); - return getExtractorById(keySet[0].toString(), nextPageUrl, localization); + return getExtractorById(keySet[0].toString(), nextPage, localization); } else { return null; } @@ -86,12 +87,12 @@ public class KioskList { return defaultKiosk; } - public KioskExtractor getExtractorById(String kioskId, String nextPageUrl) + public KioskExtractor getExtractorById(String kioskId, Page nextPage) throws ExtractionException, IOException { - return getExtractorById(kioskId, nextPageUrl, NewPipe.getPreferredLocalization()); + return getExtractorById(kioskId, nextPage, NewPipe.getPreferredLocalization()); } - public KioskExtractor getExtractorById(String kioskId, String nextPageUrl, Localization localization) + public KioskExtractor getExtractorById(String kioskId, Page nextPage, Localization localization) throws ExtractionException, IOException { KioskEntry ke = kioskList.get(kioskId); if (ke == null) { @@ -111,17 +112,17 @@ public class KioskList { return kioskList.keySet(); } - public KioskExtractor getExtractorByUrl(String url, String nextPageUrl) + public KioskExtractor getExtractorByUrl(String url, Page nextPage) throws ExtractionException, IOException { - return getExtractorByUrl(url, nextPageUrl, NewPipe.getPreferredLocalization()); + return getExtractorByUrl(url, nextPage, NewPipe.getPreferredLocalization()); } - public KioskExtractor getExtractorByUrl(String url, String nextPageUrl, Localization localization) + public KioskExtractor getExtractorByUrl(String url, Page nextPage, Localization localization) throws ExtractionException, IOException { for (Map.Entry e : kioskList.entrySet()) { KioskEntry ke = e.getValue(); if (ke.handlerFactory.acceptUrl(url)) { - return getExtractorById(ke.handlerFactory.getId(url), nextPageUrl, localization); + return getExtractorById(ke.handlerFactory.getId(url), nextPage, localization); } } throw new ExtractionException("Could not find a kiosk that fits to the url: " + url); diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/playlist/PlaylistInfo.java b/extractor/src/main/java/org/schabi/newpipe/extractor/playlist/PlaylistInfo.java index 089b1de55..873d879ec 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/playlist/PlaylistInfo.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/playlist/PlaylistInfo.java @@ -3,6 +3,7 @@ package org.schabi.newpipe.extractor.playlist; import org.schabi.newpipe.extractor.ListExtractor.InfoItemsPage; import org.schabi.newpipe.extractor.ListInfo; import org.schabi.newpipe.extractor.NewPipe; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.exceptions.ExtractionException; import org.schabi.newpipe.extractor.exceptions.ParsingException; @@ -32,8 +33,8 @@ public class PlaylistInfo extends ListInfo { public static InfoItemsPage getMoreItems(StreamingService service, String url, - String pageUrl) throws IOException, ExtractionException { - return service.getPlaylistExtractor(url).getPage(pageUrl); + Page page) throws IOException, ExtractionException { + return service.getPlaylistExtractor(url).getPage(page); } /** @@ -112,7 +113,7 @@ public class PlaylistInfo extends ListInfo { final InfoItemsPage itemsPage = ExtractorHelper.getItemsPageOrLogError(info, extractor); info.setRelatedItems(itemsPage.getItems()); - info.setNextPageUrl(itemsPage.getNextPageUrl()); + info.setNextPage(itemsPage.getNextPage()); return info; } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/search/SearchInfo.java b/extractor/src/main/java/org/schabi/newpipe/extractor/search/SearchInfo.java index b2e072cce..8967b0a89 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/search/SearchInfo.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/search/SearchInfo.java @@ -3,6 +3,7 @@ package org.schabi.newpipe.extractor.search; import org.schabi.newpipe.extractor.InfoItem; import org.schabi.newpipe.extractor.ListExtractor; import org.schabi.newpipe.extractor.ListInfo; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.exceptions.ExtractionException; import org.schabi.newpipe.extractor.linkhandler.SearchQueryHandler; @@ -10,9 +11,7 @@ import org.schabi.newpipe.extractor.utils.ExtractorHelper; import java.io.IOException; - public class SearchInfo extends ListInfo { - private String searchString; private String searchSuggestion; private boolean isCorrectedSearch; @@ -55,7 +54,7 @@ public class SearchInfo extends ListInfo { ListExtractor.InfoItemsPage page = ExtractorHelper.getItemsPageOrLogError(info, extractor); info.setRelatedItems(page.getItems()); - info.setNextPageUrl(page.getNextPageUrl()); + info.setNextPage(page.getNextPage()); return info; } @@ -63,9 +62,9 @@ public class SearchInfo extends ListInfo { public static ListExtractor.InfoItemsPage getMoreItems(StreamingService service, SearchQueryHandler query, - String pageUrl) + Page page) throws IOException, ExtractionException { - return service.getSearchExtractor(query).getPage(pageUrl); + return service.getSearchExtractor(query).getPage(page); } // Getter diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/bandcamp/extractors/BandcampChannelExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/bandcamp/extractors/BandcampChannelExtractor.java index 5a8d4d5f6..74fa8d152 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/bandcamp/extractors/BandcampChannelExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/bandcamp/extractors/BandcampChannelExtractor.java @@ -4,6 +4,7 @@ package org.schabi.newpipe.extractor.services.bandcamp.extractors; import com.grack.nanojson.*; import org.jsoup.Jsoup; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.channel.ChannelExtractor; import org.schabi.newpipe.extractor.downloader.Downloader; @@ -112,12 +113,7 @@ public class BandcampChannelExtractor extends ChannelExtractor { } @Override - public String getNextPageUrl() { - return null; - } - - @Override - public InfoItemsPage getPage(String pageUrl) { + public InfoItemsPage getPage(Page page) { return null; } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/bandcamp/extractors/BandcampFeaturedExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/bandcamp/extractors/BandcampFeaturedExtractor.java index 82905d239..53781f378 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/bandcamp/extractors/BandcampFeaturedExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/bandcamp/extractors/BandcampFeaturedExtractor.java @@ -6,8 +6,7 @@ import com.grack.nanojson.JsonArray; import com.grack.nanojson.JsonObject; import com.grack.nanojson.JsonParser; import com.grack.nanojson.JsonParserException; -import org.schabi.newpipe.extractor.InfoItem; -import org.schabi.newpipe.extractor.InfoItemsCollector; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.downloader.Downloader; import org.schabi.newpipe.extractor.exceptions.ExtractionException; @@ -78,12 +77,7 @@ public class BandcampFeaturedExtractor extends KioskExtractor } @Override - public String getNextPageUrl() { - return null; - } - - @Override - public InfoItemsPage getPage(String pageUrl) { + public InfoItemsPage getPage(Page page) { return null; } } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/bandcamp/extractors/BandcampPlaylistExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/bandcamp/extractors/BandcampPlaylistExtractor.java index 7b9da51cd..f06d51d38 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/bandcamp/extractors/BandcampPlaylistExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/bandcamp/extractors/BandcampPlaylistExtractor.java @@ -5,6 +5,7 @@ import com.grack.nanojson.JsonObject; import com.grack.nanojson.JsonParserException; import org.jsoup.Jsoup; import org.jsoup.nodes.Document; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.downloader.Downloader; import org.schabi.newpipe.extractor.exceptions.ContentNotAvailableException; @@ -143,19 +144,14 @@ public class BandcampPlaylistExtractor extends PlaylistExtractor { return new InfoItemsPage<>(collector, null); } + @Override + public InfoItemsPage getPage(Page page) { + return null; + } + @Nonnull @Override public String getName() throws ParsingException { return name; } - - @Override - public String getNextPageUrl() { - return null; - } - - @Override - public InfoItemsPage getPage(String pageUrl) { - return null; - } } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/bandcamp/extractors/BandcampRadioExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/bandcamp/extractors/BandcampRadioExtractor.java index 55a16edbe..6e837d44f 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/bandcamp/extractors/BandcampRadioExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/bandcamp/extractors/BandcampRadioExtractor.java @@ -8,6 +8,7 @@ import com.grack.nanojson.JsonParser; import com.grack.nanojson.JsonParserException; import org.schabi.newpipe.extractor.InfoItem; import org.schabi.newpipe.extractor.InfoItemsCollector; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.downloader.Downloader; import org.schabi.newpipe.extractor.exceptions.ExtractionException; @@ -71,12 +72,7 @@ public class BandcampRadioExtractor extends KioskExtractor { } @Override - public String getNextPageUrl() throws IOException, ExtractionException { - return null; - } - - @Override - public InfoItemsPage getPage(String pageUrl) { + public InfoItemsPage getPage(Page page) { return null; } } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/bandcamp/extractors/BandcampRadioStreamExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/bandcamp/extractors/BandcampRadioStreamExtractor.java index 8a69931c0..360009ea5 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/bandcamp/extractors/BandcampRadioStreamExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/bandcamp/extractors/BandcampRadioStreamExtractor.java @@ -58,7 +58,7 @@ public class BandcampRadioStreamExtractor extends BandcampStreamExtractor { @Override public String getUploaderUrl() { return Jsoup.parse(showInfo.getString("image_caption")) - .getElementsByTag("a").first().attr("href"); + .getElementsByTag("a").first().attr("href").trim(); } @Nonnull diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/bandcamp/extractors/BandcampSearchExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/bandcamp/extractors/BandcampSearchExtractor.java index 4b5672b49..9deb98c0e 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/bandcamp/extractors/BandcampSearchExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/bandcamp/extractors/BandcampSearchExtractor.java @@ -2,18 +2,19 @@ package org.schabi.newpipe.extractor.services.bandcamp.extractors; +import edu.umd.cs.findbugs.annotations.NonNull; import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import org.jsoup.select.Elements; import org.schabi.newpipe.extractor.InfoItem; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.downloader.Downloader; import org.schabi.newpipe.extractor.exceptions.ExtractionException; import org.schabi.newpipe.extractor.linkhandler.SearchQueryHandler; import org.schabi.newpipe.extractor.search.InfoItemsSearchCollector; import org.schabi.newpipe.extractor.search.SearchExtractor; -import org.schabi.newpipe.extractor.services.bandcamp.extractors.streaminfoitem.BandcampPlaylistStreamInfoItemExtractor; import org.schabi.newpipe.extractor.services.bandcamp.extractors.streaminfoitem.BandcampSearchStreamInfoItemExtractor; import javax.annotation.Nonnull; @@ -25,9 +26,10 @@ public class BandcampSearchExtractor extends SearchExtractor { super(service, linkHandler); } + @NonNull @Override public String getSearchSuggestion() { - return null; + return ""; } @Override @@ -35,10 +37,9 @@ public class BandcampSearchExtractor extends SearchExtractor { return false; } - @Override - public InfoItemsPage getPage(String pageUrl) throws IOException, ExtractionException { + public InfoItemsPage getPage(Page page) throws IOException, ExtractionException { // okay apparently this is where we DOWNLOAD the page and then COMMIT its ENTRIES to an INFOITEMPAGE - String html = getDownloader().get(pageUrl).responseBody(); + String html = getDownloader().get(page.getUrl()).responseBody(); InfoItemsSearchCollector collector = new InfoItemsSearchCollector(getServiceId()); @@ -85,8 +86,8 @@ public class BandcampSearchExtractor extends SearchExtractor { // Find current page int currentPage = -1; for (int i = 0; i < pages.size(); i++) { - Element page = pages.get(i); - if (page.getElementsByTag("span").size() > 0) { + Element pageElement = pages.get(i); + if (pageElement.getElementsByTag("span").size() > 0) { currentPage = i + 1; break; } @@ -97,23 +98,17 @@ public class BandcampSearchExtractor extends SearchExtractor { String nextUrl = null; if (currentPage < pages.size()) { - nextUrl = pageUrl.substring(0, pageUrl.length() - 1) + (currentPage + 1); + nextUrl = page.getUrl().substring(0, page.getUrl().length() - 1) + (currentPage + 1); } - return new InfoItemsPage<>(collector, nextUrl); + return new InfoItemsPage<>(collector, new Page(nextUrl)); } @Nonnull @Override public InfoItemsPage getInitialPage() throws IOException, ExtractionException { - return getPage(getUrl()); - } - - @Override - public String getNextPageUrl() throws ExtractionException { - String url = getUrl(); - return url.substring(0, url.length() - 1).concat("2"); + return getPage(new Page(getUrl())); } @Override diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/bandcamp/extractors/BandcampStreamExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/bandcamp/extractors/BandcampStreamExtractor.java index 282259275..a0da730fc 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/bandcamp/extractors/BandcampStreamExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/bandcamp/extractors/BandcampStreamExtractor.java @@ -238,11 +238,6 @@ public class BandcampStreamExtractor extends StreamExtractor { return StreamType.AUDIO_STREAM; } - @Override - public StreamInfoItem getNextStream() { - return null; - } - @Override public StreamInfoItemsCollector getRelatedStreams() { return null; diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/media_ccc/extractors/MediaCCCConferenceExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/media_ccc/extractors/MediaCCCConferenceExtractor.java index f11c13c22..7d39eacb9 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/media_ccc/extractors/MediaCCCConferenceExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/media_ccc/extractors/MediaCCCConferenceExtractor.java @@ -5,6 +5,7 @@ import com.grack.nanojson.JsonObject; import com.grack.nanojson.JsonParser; import com.grack.nanojson.JsonParserException; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.channel.ChannelExtractor; import org.schabi.newpipe.extractor.downloader.Downloader; @@ -79,13 +80,8 @@ public class MediaCCCConferenceExtractor extends ChannelExtractor { } @Override - public String getNextPageUrl() { - return null; - } - - @Override - public InfoItemsPage getPage(final String pageUrl) { - return null; + public InfoItemsPage getPage(final Page page) { + return InfoItemsPage.emptyPage(); } @Override diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/media_ccc/extractors/MediaCCCConferenceKiosk.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/media_ccc/extractors/MediaCCCConferenceKiosk.java index 010d3881a..8ca272924 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/media_ccc/extractors/MediaCCCConferenceKiosk.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/media_ccc/extractors/MediaCCCConferenceKiosk.java @@ -5,6 +5,7 @@ import com.grack.nanojson.JsonObject; import com.grack.nanojson.JsonParser; import com.grack.nanojson.JsonParserException; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.channel.ChannelInfoItem; import org.schabi.newpipe.extractor.channel.ChannelInfoItemsCollector; @@ -37,16 +38,12 @@ public class MediaCCCConferenceKiosk extends KioskExtractor { collector.commit(new MediaCCCConferenceInfoItemExtractor(conferences.getObject(i))); } - return new InfoItemsPage<>(collector, ""); + return new InfoItemsPage<>(collector, null); } @Override - public String getNextPageUrl() { - return ""; - } - @Override - public InfoItemsPage getPage(final String pageUrl) { + public InfoItemsPage getPage(final Page page) { return InfoItemsPage.emptyPage(); } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/media_ccc/extractors/MediaCCCSearchExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/media_ccc/extractors/MediaCCCSearchExtractor.java index 914c77497..676a89e8c 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/media_ccc/extractors/MediaCCCSearchExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/media_ccc/extractors/MediaCCCSearchExtractor.java @@ -6,6 +6,7 @@ import com.grack.nanojson.JsonParser; import com.grack.nanojson.JsonParserException; import org.schabi.newpipe.extractor.InfoItem; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.channel.ChannelInfoItem; import org.schabi.newpipe.extractor.channel.ChannelInfoItemExtractor; @@ -80,12 +81,7 @@ public class MediaCCCSearchExtractor extends SearchExtractor { } @Override - public String getNextPageUrl() { - return ""; - } - - @Override - public InfoItemsPage getPage(final String pageUrl) { + public InfoItemsPage getPage(final Page page) { return InfoItemsPage.emptyPage(); } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/media_ccc/extractors/MediaCCCStreamExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/media_ccc/extractors/MediaCCCStreamExtractor.java index 93772608e..042c5cd18 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/media_ccc/extractors/MediaCCCStreamExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/media_ccc/extractors/MediaCCCStreamExtractor.java @@ -15,7 +15,6 @@ import org.schabi.newpipe.extractor.localization.DateWrapper; import org.schabi.newpipe.extractor.stream.AudioStream; import org.schabi.newpipe.extractor.stream.Description; import org.schabi.newpipe.extractor.stream.StreamExtractor; -import org.schabi.newpipe.extractor.stream.StreamInfoItem; import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector; import org.schabi.newpipe.extractor.stream.StreamType; import org.schabi.newpipe.extractor.stream.SubtitlesStream; @@ -215,11 +214,6 @@ public class MediaCCCStreamExtractor extends StreamExtractor { return StreamType.VIDEO_STREAM; } - @Override - public StreamInfoItem getNextStream() { - return null; - } - @Override public StreamInfoItemsCollector getRelatedStreams() { return new StreamInfoItemsCollector(getServiceId()); diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/PeertubeParsingHelper.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/PeertubeParsingHelper.java index f752ecfef..5c6ceac42 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/PeertubeParsingHelper.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/PeertubeParsingHelper.java @@ -1,9 +1,14 @@ package org.schabi.newpipe.extractor.services.peertube; +import com.grack.nanojson.JsonArray; import com.grack.nanojson.JsonObject; +import org.schabi.newpipe.extractor.InfoItemsCollector; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.exceptions.ContentNotAvailableException; import org.schabi.newpipe.extractor.exceptions.ParsingException; +import org.schabi.newpipe.extractor.services.peertube.extractors.PeertubeStreamInfoItemExtractor; +import org.schabi.newpipe.extractor.utils.JsonUtils; import org.schabi.newpipe.extractor.utils.Parser; import org.schabi.newpipe.extractor.utils.Utils; @@ -14,7 +19,6 @@ import java.util.Date; import java.util.TimeZone; public class PeertubeParsingHelper { - public static final String START_KEY = "start"; public static final String COUNT_KEY = "count"; public static final int ITEMS_PER_PAGE = 12; @@ -23,17 +27,17 @@ public class PeertubeParsingHelper { private PeertubeParsingHelper() { } - public static void validate(JsonObject json) throws ContentNotAvailableException { - String error = json.getString("error"); + public static void validate(final JsonObject json) throws ContentNotAvailableException { + final String error = json.getString("error"); if (!Utils.isBlank(error)) { throw new ContentNotAvailableException(error); } } - public static Calendar parseDateFrom(String textualUploadDate) throws ParsingException { - Date date; + public static Calendar parseDateFrom(final String textualUploadDate) throws ParsingException { + final Date date; try { - SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.S'Z'"); + final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.S'Z'"); sdf.setTimeZone(TimeZone.getTimeZone("GMT")); date = sdf.parse(textualUploadDate); } catch (ParseException e) { @@ -45,26 +49,42 @@ public class PeertubeParsingHelper { return uploadDate; } - public static String getNextPageUrl(String prevPageUrl, long total) { - String prevStart; + public static Page getNextPage(final String prevPageUrl, final long total) { + final String prevStart; try { prevStart = Parser.matchGroup1(START_PATTERN, prevPageUrl); } catch (Parser.RegexException e) { - return ""; + return null; } - if (Utils.isBlank(prevStart)) return ""; - long nextStart = 0; + if (Utils.isBlank(prevStart)) return null; + final long nextStart; try { nextStart = Long.parseLong(prevStart) + ITEMS_PER_PAGE; } catch (NumberFormatException e) { - return ""; + return null; } if (nextStart >= total) { - return ""; + return null; } else { - return prevPageUrl.replace(START_KEY + "=" + prevStart, START_KEY + "=" + nextStart); + return new Page(prevPageUrl.replace(START_KEY + "=" + prevStart, START_KEY + "=" + nextStart)); } } + public static void collectStreamsFrom(final InfoItemsCollector collector, final JsonObject json, final String baseUrl) throws ParsingException { + final JsonArray contents; + try { + contents = (JsonArray) JsonUtils.getValue(json, "data"); + } catch (Exception e) { + throw new ParsingException("Unable to extract list info", e); + } + + for (final Object c : contents) { + if (c instanceof JsonObject) { + final JsonObject item = (JsonObject) c; + final PeertubeStreamInfoItemExtractor extractor = new PeertubeStreamInfoItemExtractor(item, baseUrl); + collector.commit(extractor); + } + } + } } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeAccountExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeAccountExtractor.java index 7eabcc383..49978e889 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeAccountExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeAccountExtractor.java @@ -1,10 +1,10 @@ package org.schabi.newpipe.extractor.services.peertube.extractors; -import com.grack.nanojson.JsonArray; import com.grack.nanojson.JsonObject; import com.grack.nanojson.JsonParser; import com.grack.nanojson.JsonParserException; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.channel.ChannelExtractor; import org.schabi.newpipe.extractor.downloader.Downloader; @@ -20,23 +20,23 @@ import org.schabi.newpipe.extractor.utils.Utils; import java.io.IOException; -import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.*; +import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.COUNT_KEY; +import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.ITEMS_PER_PAGE; +import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.START_KEY; +import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.collectStreamsFrom; +import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty; public class PeertubeAccountExtractor extends ChannelExtractor { - - private InfoItemsPage initPage; - private long total; - private JsonObject json; private final String baseUrl; - public PeertubeAccountExtractor(StreamingService service, ListLinkHandler linkHandler) throws ParsingException { + public PeertubeAccountExtractor(final StreamingService service, final ListLinkHandler linkHandler) throws ParsingException { super(service, linkHandler); this.baseUrl = getBaseUrl(); } @Override - public String getAvatarUrl() throws ParsingException { + public String getAvatarUrl() { String value; try { value = JsonUtils.getString(json, "avatar.path"); @@ -47,7 +47,7 @@ public class PeertubeAccountExtractor extends ChannelExtractor { } @Override - public String getBannerUrl() throws ParsingException { + public String getBannerUrl() { return null; } @@ -57,13 +57,12 @@ public class PeertubeAccountExtractor extends ChannelExtractor { } @Override - public long getSubscriberCount() throws ParsingException { - Number number = JsonUtils.getNumber(json, "followersCount"); - return number.longValue(); + public long getSubscriberCount() { + return json.getLong("followersCount"); } @Override - public String getDescription() throws ParsingException { + public String getDescription() { try { return JsonUtils.getString(json, "description"); } catch (ParsingException e) { @@ -72,93 +71,73 @@ public class PeertubeAccountExtractor extends ChannelExtractor { } @Override - public String getParentChannelName() throws ParsingException { + public String getParentChannelName() { return ""; } @Override - public String getParentChannelUrl() throws ParsingException { + public String getParentChannelUrl() { return ""; } @Override - public String getParentChannelAvatarUrl() throws ParsingException { + public String getParentChannelAvatarUrl() { return ""; } @Override public InfoItemsPage getInitialPage() throws IOException, ExtractionException { - super.fetchPage(); - return initPage; - } - - private void collectStreamsFrom(StreamInfoItemsCollector collector, JsonObject json, String pageUrl) throws ParsingException { - JsonArray contents; - try { - contents = (JsonArray) JsonUtils.getValue(json, "data"); - } catch (Exception e) { - throw new ParsingException("unable to extract channel streams", e); - } - - for (Object c : contents) { - if (c instanceof JsonObject) { - final JsonObject item = (JsonObject) c; - PeertubeStreamInfoItemExtractor extractor = new PeertubeStreamInfoItemExtractor(item, baseUrl); - collector.commit(extractor); - } - } - + final String pageUrl = getUrl() + "/videos?" + START_KEY + "=0&" + COUNT_KEY + "=" + ITEMS_PER_PAGE; + return getPage(new Page(pageUrl)); } @Override - public String getNextPageUrl() throws IOException, ExtractionException { - super.fetchPage(); - return initPage.getNextPageUrl(); - } + public InfoItemsPage getPage(final Page page) throws IOException, ExtractionException { + if (page == null || isNullOrEmpty(page.getUrl())) { + throw new IllegalArgumentException("Page doesn't contain an URL"); + } + + final Response response = getDownloader().get(page.getUrl()); - @Override - public InfoItemsPage getPage(String pageUrl) throws IOException, ExtractionException { - Response response = getDownloader().get(pageUrl); JsonObject json = null; if (response != null && !Utils.isBlank(response.responseBody())) { try { json = JsonParser.object().from(response.responseBody()); } catch (Exception e) { - throw new ParsingException("Could not parse json data for kiosk info", e); + throw new ParsingException("Could not parse json data for account info", e); } } - StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); if (json != null) { PeertubeParsingHelper.validate(json); - total = JsonUtils.getNumber(json, "total").longValue(); - collectStreamsFrom(collector, json, pageUrl); + final long total = json.getLong("total"); + + final StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); + collectStreamsFrom(collector, json, getBaseUrl()); + + return new InfoItemsPage<>(collector, PeertubeParsingHelper.getNextPage(page.getUrl(), total)); } else { - throw new ExtractionException("Unable to get PeerTube kiosk info"); + throw new ExtractionException("Unable to get PeerTube account info"); } - return new InfoItemsPage<>(collector, PeertubeParsingHelper.getNextPageUrl(pageUrl, total)); } @Override - public void onFetchPage(Downloader downloader) throws IOException, ExtractionException { - Response response = downloader.get(getUrl()); - if (null != response && null != response.responseBody()) { + public void onFetchPage(final Downloader downloader) throws IOException, ExtractionException { + final Response response = downloader.get(getUrl()); + if (response != null && response.responseBody() != null) { setInitialData(response.responseBody()); } else { - throw new ExtractionException("Unable to extract PeerTube channel data"); + throw new ExtractionException("Unable to extract PeerTube account data"); } - - String pageUrl = getUrl() + "/videos?" + START_KEY + "=0&" + COUNT_KEY + "=" + ITEMS_PER_PAGE; - this.initPage = getPage(pageUrl); } - private void setInitialData(String responseBody) throws ExtractionException { + private void setInitialData(final String responseBody) throws ExtractionException { try { json = JsonParser.object().from(responseBody); } catch (JsonParserException e) { - throw new ExtractionException("Unable to extract PeerTube channel data", e); + throw new ExtractionException("Unable to extract PeerTube account data", e); } - if (json == null) throw new ExtractionException("Unable to extract PeerTube channel data"); + if (json == null) throw new ExtractionException("Unable to extract PeerTube account data"); } @Override @@ -170,5 +149,4 @@ public class PeertubeAccountExtractor extends ChannelExtractor { public String getOriginalUrl() throws ParsingException { return baseUrl + "/" + getId(); } - } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeChannelExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeChannelExtractor.java index 56d1caab5..e6cc66b6a 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeChannelExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeChannelExtractor.java @@ -1,10 +1,10 @@ package org.schabi.newpipe.extractor.services.peertube.extractors; -import com.grack.nanojson.JsonArray; import com.grack.nanojson.JsonObject; import com.grack.nanojson.JsonParser; import com.grack.nanojson.JsonParserException; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.channel.ChannelExtractor; import org.schabi.newpipe.extractor.downloader.Downloader; @@ -16,29 +16,28 @@ import org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper; import org.schabi.newpipe.extractor.stream.StreamInfoItem; import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector; import org.schabi.newpipe.extractor.utils.JsonUtils; -import org.schabi.newpipe.extractor.utils.Parser; -import org.schabi.newpipe.extractor.utils.Parser.RegexException; import org.schabi.newpipe.extractor.utils.Utils; import java.io.IOException; -import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.*; +import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.COUNT_KEY; +import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.ITEMS_PER_PAGE; +import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.START_KEY; +import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.collectStreamsFrom; +import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty; + public class PeertubeChannelExtractor extends ChannelExtractor { - - private InfoItemsPage initPage; - private long total; - private JsonObject json; private final String baseUrl; - public PeertubeChannelExtractor(StreamingService service, ListLinkHandler linkHandler) throws ParsingException { + public PeertubeChannelExtractor(final StreamingService service, final ListLinkHandler linkHandler) throws ParsingException { super(service, linkHandler); this.baseUrl = getBaseUrl(); } @Override - public String getAvatarUrl() throws ParsingException { + public String getAvatarUrl() { String value; try { value = JsonUtils.getString(json, "avatar.path"); @@ -49,7 +48,7 @@ public class PeertubeChannelExtractor extends ChannelExtractor { } @Override - public String getBannerUrl() throws ParsingException { + public String getBannerUrl() { return null; } @@ -59,13 +58,12 @@ public class PeertubeChannelExtractor extends ChannelExtractor { } @Override - public long getSubscriberCount() throws ParsingException { - Number number = JsonUtils.getNumber(json, "followersCount"); - return number.longValue(); + public long getSubscriberCount() { + return json.getLong("followersCount"); } @Override - public String getDescription() throws ParsingException { + public String getDescription() { try { return JsonUtils.getString(json, "description"); } catch (ParsingException e) { @@ -84,7 +82,7 @@ public class PeertubeChannelExtractor extends ChannelExtractor { } @Override - public String getParentChannelAvatarUrl() throws ParsingException { + public String getParentChannelAvatarUrl() { String value; try { value = JsonUtils.getString(json, "ownerAccount.avatar.path"); @@ -96,74 +94,55 @@ public class PeertubeChannelExtractor extends ChannelExtractor { @Override public InfoItemsPage getInitialPage() throws IOException, ExtractionException { - super.fetchPage(); - return initPage; - } - - private void collectStreamsFrom(StreamInfoItemsCollector collector, JsonObject json, String pageUrl) throws ParsingException { - JsonArray contents; - try { - contents = (JsonArray) JsonUtils.getValue(json, "data"); - } catch (Exception e) { - throw new ParsingException("unable to extract channel streams", e); - } - - for (Object c : contents) { - if (c instanceof JsonObject) { - final JsonObject item = (JsonObject) c; - PeertubeStreamInfoItemExtractor extractor = new PeertubeStreamInfoItemExtractor(item, baseUrl); - collector.commit(extractor); - } - } - + final String pageUrl = getUrl() + "/videos?" + START_KEY + "=0&" + COUNT_KEY + "=" + ITEMS_PER_PAGE; + return getPage(new Page(pageUrl)); } @Override - public String getNextPageUrl() throws IOException, ExtractionException { - super.fetchPage(); - return initPage.getNextPageUrl(); - } + public InfoItemsPage getPage(final Page page) throws IOException, ExtractionException { + if (page == null || isNullOrEmpty(page.getUrl())) { + throw new IllegalArgumentException("Page doesn't contain an URL"); + } + + final Response response = getDownloader().get(page.getUrl()); - @Override - public InfoItemsPage getPage(String pageUrl) throws IOException, ExtractionException { - Response response = getDownloader().get(pageUrl); JsonObject json = null; if (response != null && !Utils.isBlank(response.responseBody())) { try { json = JsonParser.object().from(response.responseBody()); } catch (Exception e) { - throw new ParsingException("Could not parse json data for kiosk info", e); + throw new ParsingException("Could not parse json data for channel info", e); } } - StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); if (json != null) { PeertubeParsingHelper.validate(json); - this.total = JsonUtils.getNumber(json, "total").longValue(); - collectStreamsFrom(collector, json, pageUrl); + final long total = json.getLong("total"); + + final StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); + collectStreamsFrom(collector, json, getBaseUrl()); + + return new InfoItemsPage<>(collector, PeertubeParsingHelper.getNextPage(page.getUrl(), total)); } else { - throw new ExtractionException("Unable to get PeerTube kiosk info"); + throw new ExtractionException("Unable to get PeerTube channel info"); } - return new InfoItemsPage<>(collector, PeertubeParsingHelper.getNextPageUrl(pageUrl, total)); } @Override - public void onFetchPage(Downloader downloader) throws IOException, ExtractionException { - Response response = downloader.get(getUrl()); - if (null != response && null != response.responseBody()) { + public void onFetchPage(final Downloader downloader) throws IOException, ExtractionException { + final Response response = downloader.get(getUrl()); + if (response != null && response.responseBody() != null) { setInitialData(response.responseBody()); } else { throw new ExtractionException("Unable to extract PeerTube channel data"); } - - this.initPage = getPage(getUrl() + "/videos?" + START_KEY + "=0&" + COUNT_KEY + "=" + ITEMS_PER_PAGE); } - private void setInitialData(String responseBody) throws ExtractionException { + private void setInitialData(final String responseBody) throws ExtractionException { try { json = JsonParser.object().from(responseBody); } catch (JsonParserException e) { - throw new ExtractionException("Unable to extract peertube channel data", e); + throw new ExtractionException("Unable to extract PeerTube channel data", e); } if (json == null) throw new ExtractionException("Unable to extract PeerTube channel data"); } @@ -177,5 +156,4 @@ public class PeertubeChannelExtractor extends ChannelExtractor { public String getOriginalUrl() throws ParsingException { return baseUrl + "/" + getId(); } - } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeCommentsExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeCommentsExtractor.java index 08f682cf9..7c51e7404 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeCommentsExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeCommentsExtractor.java @@ -4,6 +4,7 @@ import com.grack.nanojson.JsonArray; import com.grack.nanojson.JsonObject; import com.grack.nanojson.JsonParser; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.comments.CommentsExtractor; import org.schabi.newpipe.extractor.comments.CommentsInfoItem; @@ -15,56 +16,48 @@ import org.schabi.newpipe.extractor.exceptions.ParsingException; import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler; import org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper; import org.schabi.newpipe.extractor.utils.JsonUtils; -import org.schabi.newpipe.extractor.utils.Parser; -import org.schabi.newpipe.extractor.utils.Parser.RegexException; import org.schabi.newpipe.extractor.utils.Utils; import java.io.IOException; -import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.*; +import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.COUNT_KEY; +import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.ITEMS_PER_PAGE; +import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.START_KEY; +import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty; public class PeertubeCommentsExtractor extends CommentsExtractor { - - private InfoItemsPage initPage; - private long total; - - public PeertubeCommentsExtractor(StreamingService service, ListLinkHandler uiHandler) { + public PeertubeCommentsExtractor(final StreamingService service, final ListLinkHandler uiHandler) { super(service, uiHandler); } @Override public InfoItemsPage getInitialPage() throws IOException, ExtractionException { - super.fetchPage(); - return initPage; + final String pageUrl = getUrl() + "?" + START_KEY + "=0&" + COUNT_KEY + "=" + ITEMS_PER_PAGE; + return getPage(new Page(pageUrl)); } - private void collectStreamsFrom(CommentsInfoItemsCollector collector, JsonObject json, String pageUrl) throws ParsingException { - JsonArray contents; - try { - contents = (JsonArray) JsonUtils.getValue(json, "data"); - } catch (Exception e) { - throw new ParsingException("unable to extract comments info", e); - } + private void collectCommentsFrom(final CommentsInfoItemsCollector collector, final JsonObject json) throws ParsingException { + final JsonArray contents = json.getArray("data"); - for (Object c : contents) { + for (final Object c : contents) { if (c instanceof JsonObject) { final JsonObject item = (JsonObject) c; - PeertubeCommentsInfoItemExtractor extractor = new PeertubeCommentsInfoItemExtractor(item, this); - collector.commit(extractor); + if (!item.getBoolean("isDeleted")) { + final PeertubeCommentsInfoItemExtractor extractor = new PeertubeCommentsInfoItemExtractor(item, this); + collector.commit(extractor); + } } } - } @Override - public String getNextPageUrl() throws IOException, ExtractionException { - super.fetchPage(); - return initPage.getNextPageUrl(); - } + public InfoItemsPage getPage(final Page page) throws IOException, ExtractionException { + if (page == null || isNullOrEmpty(page.getUrl())) { + throw new IllegalArgumentException("Page doesn't contain an URL"); + } + + final Response response = getDownloader().get(page.getUrl()); - @Override - public InfoItemsPage getPage(String pageUrl) throws IOException, ExtractionException { - Response response = getDownloader().get(pageUrl); JsonObject json = null; if (response != null && !Utils.isBlank(response.responseBody())) { try { @@ -74,19 +67,19 @@ public class PeertubeCommentsExtractor extends CommentsExtractor { } } - CommentsInfoItemsCollector collector = new CommentsInfoItemsCollector(getServiceId()); if (json != null) { - Number number = JsonUtils.getNumber(json, "total"); - if (number != null) this.total = number.longValue(); - collectStreamsFrom(collector, json, pageUrl); + PeertubeParsingHelper.validate(json); + final long total = json.getLong("total"); + + final CommentsInfoItemsCollector collector = new CommentsInfoItemsCollector(getServiceId()); + collectCommentsFrom(collector, json); + + return new InfoItemsPage<>(collector, PeertubeParsingHelper.getNextPage(page.getUrl(), total)); } else { - throw new ExtractionException("Unable to get peertube comments info"); + throw new ExtractionException("Unable to get PeerTube kiosk info"); } - return new InfoItemsPage<>(collector, PeertubeParsingHelper.getNextPageUrl(pageUrl, total)); } @Override - public void onFetchPage(Downloader downloader) throws IOException, ExtractionException { - this.initPage = getPage(getUrl() + "?" + START_KEY + "=0&" + COUNT_KEY + "=" + ITEMS_PER_PAGE); - } + public void onFetchPage(Downloader downloader) { } } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeCommentsInfoItemExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeCommentsInfoItemExtractor.java index e1dfa3241..446a4e8e7 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeCommentsInfoItemExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeCommentsInfoItemExtractor.java @@ -1,6 +1,7 @@ package org.schabi.newpipe.extractor.services.peertube.extractors; import com.grack.nanojson.JsonObject; + import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.schabi.newpipe.extractor.ServiceList; @@ -10,14 +11,14 @@ import org.schabi.newpipe.extractor.localization.DateWrapper; import org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper; import org.schabi.newpipe.extractor.utils.JsonUtils; +import java.util.Objects; public class PeertubeCommentsInfoItemExtractor implements CommentsInfoItemExtractor { - private final JsonObject item; private final String url; private final String baseUrl; - public PeertubeCommentsInfoItemExtractor(JsonObject item, PeertubeCommentsExtractor extractor) throws ParsingException { + public PeertubeCommentsInfoItemExtractor(final JsonObject item, final PeertubeCommentsExtractor extractor) throws ParsingException { this.item = item; this.url = extractor.getUrl(); this.baseUrl = extractor.getBaseUrl(); @@ -29,7 +30,7 @@ public class PeertubeCommentsInfoItemExtractor implements CommentsInfoItemExtrac } @Override - public String getThumbnailUrl() throws ParsingException { + public String getThumbnailUrl() { String value; try { value = JsonUtils.getString(item, "account.avatar.path"); @@ -51,20 +52,20 @@ public class PeertubeCommentsInfoItemExtractor implements CommentsInfoItemExtrac @Override public DateWrapper getUploadDate() throws ParsingException { - String textualUploadDate = getTextualUploadDate(); + final String textualUploadDate = getTextualUploadDate(); return new DateWrapper(PeertubeParsingHelper.parseDateFrom(textualUploadDate)); } @Override - public int getLikeCount() throws ParsingException { + public int getLikeCount() { return -1; } @Override public String getCommentText() throws ParsingException { - String htmlText = JsonUtils.getString(item, "text"); + final String htmlText = JsonUtils.getString(item, "text"); try { - Document doc = Jsoup.parse(htmlText); + final Document doc = Jsoup.parse(htmlText); return doc.body().text(); } catch (Exception e) { return htmlText.replaceAll("(?s)<[^>]*>(\\s*<[^>]*>)*", ""); @@ -72,13 +73,12 @@ public class PeertubeCommentsInfoItemExtractor implements CommentsInfoItemExtrac } @Override - public String getCommentId() throws ParsingException { - Number value = JsonUtils.getNumber(item, "id"); - return value.toString(); + public String getCommentId() { + return Objects.toString(item.getLong("id"), null); } @Override - public String getUploaderAvatarUrl() throws ParsingException { + public String getUploaderAvatarUrl() { String value; try { value = JsonUtils.getString(item, "account.avatar.path"); @@ -95,9 +95,8 @@ public class PeertubeCommentsInfoItemExtractor implements CommentsInfoItemExtrac @Override public String getUploaderUrl() throws ParsingException { - String name = JsonUtils.getString(item, "account.name"); - String host = JsonUtils.getString(item, "account.host"); + final String name = JsonUtils.getString(item, "account.name"); + final String host = JsonUtils.getString(item, "account.host"); return ServiceList.PeerTube.getChannelLHFactory().fromId("accounts/" + name + "@" + host, baseUrl).getUrl(); } - } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubePlaylistExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubePlaylistExtractor.java index c689096ab..b22985369 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubePlaylistExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubePlaylistExtractor.java @@ -1,9 +1,10 @@ package org.schabi.newpipe.extractor.services.peertube.extractors; -import com.grack.nanojson.JsonArray; import com.grack.nanojson.JsonObject; import com.grack.nanojson.JsonParser; import com.grack.nanojson.JsonParserException; + +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.downloader.Downloader; import org.schabi.newpipe.extractor.downloader.Response; @@ -14,22 +15,22 @@ import org.schabi.newpipe.extractor.playlist.PlaylistExtractor; import org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper; import org.schabi.newpipe.extractor.stream.StreamInfoItem; import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector; -import org.schabi.newpipe.extractor.utils.JsonUtils; +import org.schabi.newpipe.extractor.utils.Utils; -import javax.annotation.Nonnull; import java.io.IOException; -import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.*; +import javax.annotation.Nonnull; + +import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.COUNT_KEY; +import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.ITEMS_PER_PAGE; +import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.START_KEY; +import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.collectStreamsFrom; +import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty; public class PeertubePlaylistExtractor extends PlaylistExtractor { - private JsonObject playlistInfo; - private JsonObject playlistVideos; - private String initialPageUrl; - private long total; - - public PeertubePlaylistExtractor(StreamingService service, ListLinkHandler linkHandler) { + public PeertubePlaylistExtractor(final StreamingService service, final ListLinkHandler linkHandler) { super(service, linkHandler); } @@ -39,17 +40,17 @@ public class PeertubePlaylistExtractor extends PlaylistExtractor { } @Override - public String getBannerUrl() throws ParsingException { + public String getBannerUrl() { return null; } @Override - public String getUploaderUrl() throws ParsingException { + public String getUploaderUrl() { return playlistInfo.getObject("ownerAccount").getString("url"); } @Override - public String getUploaderName() throws ParsingException { + public String getUploaderName() { return playlistInfo.getObject("ownerAccount").getString("displayName"); } @@ -59,19 +60,19 @@ public class PeertubePlaylistExtractor extends PlaylistExtractor { } @Override - public long getStreamCount() throws ParsingException { - return playlistInfo.getNumber("videosLength").longValue(); + public long getStreamCount() { + return playlistInfo.getLong("videosLength"); } @Nonnull @Override - public String getSubChannelName() throws ParsingException { + public String getSubChannelName() { return playlistInfo.getObject("videoChannel").getString("displayName"); } @Nonnull @Override - public String getSubChannelUrl() throws ParsingException { + public String getSubChannelUrl() { return playlistInfo.getObject("videoChannel").getString("url"); } @@ -84,47 +85,48 @@ public class PeertubePlaylistExtractor extends PlaylistExtractor { @Nonnull @Override public InfoItemsPage getInitialPage() throws IOException, ExtractionException { - return getPage(initialPageUrl); + return getPage(new Page(getUrl() + "/videos?" + START_KEY + "=0&" + COUNT_KEY + "=" + ITEMS_PER_PAGE)); } @Override - public String getNextPageUrl() throws IOException, ExtractionException { - return PeertubeParsingHelper.getNextPageUrl(initialPageUrl, total); - } - - @Override - public InfoItemsPage getPage(String pageUrl) throws IOException, ExtractionException { - Response response = getDownloader().get(pageUrl); - try { - playlistVideos = JsonParser.object().from(response.responseBody()); - } catch (JsonParserException jpe) { - throw new ExtractionException("Could not parse json", jpe); - } - PeertubeParsingHelper.validate(playlistVideos); - - this.total = JsonUtils.getNumber(playlistVideos, "total").longValue(); - - StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); - - JsonArray videos = playlistVideos.getArray("data"); - for (Object o : videos) { - JsonObject video = ((JsonObject) o).getObject("video"); - collector.commit(new PeertubeStreamInfoItemExtractor(video, getBaseUrl())); + public InfoItemsPage getPage(final Page page) throws IOException, ExtractionException { + if (page == null || isNullOrEmpty(page.getUrl())) { + throw new IllegalArgumentException("Page doesn't contain an URL"); } - return new InfoItemsPage<>(collector, PeertubeParsingHelper.getNextPageUrl(pageUrl, total)); + final Response response = getDownloader().get(page.getUrl()); + + JsonObject json = null; + if (response != null && !Utils.isBlank(response.responseBody())) { + try { + json = JsonParser.object().from(response.responseBody()); + } catch (Exception e) { + throw new ParsingException("Could not parse json data for playlist info", e); + } + } + + if (json != null) { + PeertubeParsingHelper.validate(json); + final long total = json.getLong("total"); + + final StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); + collectStreamsFrom(collector, json, getBaseUrl()); + + return new InfoItemsPage<>(collector, PeertubeParsingHelper.getNextPage(page.getUrl(), total)); + } else { + throw new ExtractionException("Unable to get PeerTube playlist info"); + } } @Override - public void onFetchPage(@Nonnull Downloader downloader) throws IOException, ExtractionException { - Response response = downloader.get(getUrl()); + public void onFetchPage(@Nonnull final Downloader downloader) throws IOException, ExtractionException { + final Response response = downloader.get(getUrl()); try { playlistInfo = JsonParser.object().from(response.responseBody()); } catch (JsonParserException jpe) { throw new ExtractionException("Could not parse json", jpe); } PeertubeParsingHelper.validate(playlistInfo); - initialPageUrl = getUrl() + "/videos?" + START_KEY + "=0&" + COUNT_KEY + "=" + ITEMS_PER_PAGE; } @Nonnull diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeSearchExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeSearchExtractor.java index 0c8d4fef1..72fcbed2e 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeSearchExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeSearchExtractor.java @@ -1,12 +1,10 @@ package org.schabi.newpipe.extractor.services.peertube.extractors; -import com.grack.nanojson.JsonArray; import com.grack.nanojson.JsonObject; import com.grack.nanojson.JsonParser; import org.schabi.newpipe.extractor.InfoItem; -import org.schabi.newpipe.extractor.InfoItemExtractor; -import org.schabi.newpipe.extractor.InfoItemsCollector; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.downloader.Downloader; import org.schabi.newpipe.extractor.downloader.Response; @@ -16,28 +14,26 @@ import org.schabi.newpipe.extractor.linkhandler.SearchQueryHandler; import org.schabi.newpipe.extractor.search.InfoItemsSearchCollector; import org.schabi.newpipe.extractor.search.SearchExtractor; import org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper; -import org.schabi.newpipe.extractor.utils.JsonUtils; -import org.schabi.newpipe.extractor.utils.Parser; -import org.schabi.newpipe.extractor.utils.Parser.RegexException; import org.schabi.newpipe.extractor.utils.Utils; -import javax.annotation.Nonnull; import java.io.IOException; -import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.*; +import javax.annotation.Nonnull; + +import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.COUNT_KEY; +import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.ITEMS_PER_PAGE; +import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.START_KEY; +import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.collectStreamsFrom; +import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty; public class PeertubeSearchExtractor extends SearchExtractor { - - private InfoItemsPage initPage; - private long total; - public PeertubeSearchExtractor(StreamingService service, SearchQueryHandler linkHandler) { super(service, linkHandler); } @Nonnull @Override - public String getSearchSuggestion() throws ParsingException { + public String getSearchSuggestion() { return ""; } @@ -48,44 +44,20 @@ public class PeertubeSearchExtractor extends SearchExtractor { @Override public InfoItemsPage getInitialPage() throws IOException, ExtractionException { - super.fetchPage(); - return initPage; - } - - private InfoItemsCollector collectStreamsFrom(JsonObject json) throws ParsingException { - final InfoItemsSearchCollector collector = new InfoItemsSearchCollector(getServiceId()); - - JsonArray contents; - try { - contents = (JsonArray) JsonUtils.getValue(json, "data"); - } catch (Exception e) { - throw new ParsingException("unable to extract search info", e); - } - - String baseUrl = getBaseUrl(); - for (Object c : contents) { - if (c instanceof JsonObject) { - final JsonObject item = (JsonObject) c; - PeertubeStreamInfoItemExtractor extractor = new PeertubeStreamInfoItemExtractor(item, baseUrl); - collector.commit(extractor); - } - } - - return collector; - + final String pageUrl = getUrl() + "&" + START_KEY + "=0&" + COUNT_KEY + "=" + ITEMS_PER_PAGE; + return getPage(new Page(pageUrl)); } @Override - public String getNextPageUrl() throws IOException, ExtractionException { - super.fetchPage(); - return initPage.getNextPageUrl(); - } + public InfoItemsPage getPage(final Page page) throws IOException, ExtractionException { + if (page == null || isNullOrEmpty(page.getUrl())) { + throw new IllegalArgumentException("Page doesn't contain an URL"); + } + + final Response response = getDownloader().get(page.getUrl()); - @Override - public InfoItemsPage getPage(String pageUrl) throws IOException, ExtractionException { - Response response = getDownloader().get(pageUrl); JsonObject json = null; - if (null != response && !Utils.isBlank(response.responseBody())) { + if (response != null && !Utils.isBlank(response.responseBody())) { try { json = JsonParser.object().from(response.responseBody()); } catch (Exception e) { @@ -94,15 +66,18 @@ public class PeertubeSearchExtractor extends SearchExtractor { } if (json != null) { - total = JsonUtils.getNumber(json, "total").longValue(); - return new InfoItemsPage<>(collectStreamsFrom(json), PeertubeParsingHelper.getNextPageUrl(pageUrl, total)); + PeertubeParsingHelper.validate(json); + final long total = json.getLong("total"); + + final InfoItemsSearchCollector collector = new InfoItemsSearchCollector(getServiceId()); + collectStreamsFrom(collector, json, getBaseUrl()); + + return new InfoItemsPage<>(collector, PeertubeParsingHelper.getNextPage(page.getUrl(), total)); } else { - throw new ExtractionException("Unable to get peertube search info"); + throw new ExtractionException("Unable to get PeerTube search info"); } } @Override - public void onFetchPage(Downloader downloader) throws IOException, ExtractionException { - initPage = getPage(getUrl() + "&" + START_KEY + "=0&" + COUNT_KEY + "=" + ITEMS_PER_PAGE); - } + public void onFetchPage(@Nonnull final Downloader downloader) throws IOException, ExtractionException { } } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeStreamExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeStreamExtractor.java index 72b5bea93..4fda78275 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeStreamExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeStreamExtractor.java @@ -21,7 +21,6 @@ import org.schabi.newpipe.extractor.stream.AudioStream; import org.schabi.newpipe.extractor.stream.Description; import org.schabi.newpipe.extractor.stream.Stream; import org.schabi.newpipe.extractor.stream.StreamExtractor; -import org.schabi.newpipe.extractor.stream.StreamInfoItem; import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector; import org.schabi.newpipe.extractor.stream.StreamType; import org.schabi.newpipe.extractor.stream.SubtitlesStream; @@ -40,13 +39,11 @@ import java.util.Locale; import javax.annotation.Nonnull; public class PeertubeStreamExtractor extends StreamExtractor { - - private final String baseUrl; private JsonObject json; private List subtitles = new ArrayList<>(); - public PeertubeStreamExtractor(StreamingService service, LinkHandler linkHandler) throws ParsingException { + public PeertubeStreamExtractor(final StreamingService service, final LinkHandler linkHandler) throws ParsingException { super(service, linkHandler); this.baseUrl = getBaseUrl(); } @@ -82,10 +79,10 @@ public class PeertubeStreamExtractor extends StreamExtractor { } if (text.length() == 250 && text.substring(247).equals("...")) { //if description is shortened, get full description - Downloader dl = NewPipe.getDownloader(); + final Downloader dl = NewPipe.getDownloader(); try { - Response response = dl.get(getUrl() + "/description"); - JsonObject jsonObject = JsonParser.object().from(response.responseBody()); + final Response response = dl.get(getUrl() + "/description"); + final JsonObject jsonObject = JsonParser.object().from(response.responseBody()); text = JsonUtils.getString(jsonObject, "description"); } catch (ReCaptchaException | IOException | JsonParserException e) { e.printStackTrace(); @@ -96,7 +93,7 @@ public class PeertubeStreamExtractor extends StreamExtractor { @Override public int getAgeLimit() throws ParsingException { - boolean isNSFW = JsonUtils.getBoolean(json, "nsfw"); + final boolean isNSFW = JsonUtils.getBoolean(json, "nsfw"); if (isNSFW) { return 18; } else { @@ -105,39 +102,35 @@ public class PeertubeStreamExtractor extends StreamExtractor { } @Override - public long getLength() throws ParsingException { - Number value = JsonUtils.getNumber(json, "duration"); - return value.longValue(); + public long getLength() { + return json.getLong("duration"); } @Override - public long getTimeStamp() throws ParsingException { + public long getTimeStamp() { //TODO fetch timestamp from url if present; return 0; } @Override - public long getViewCount() throws ParsingException { - Number value = JsonUtils.getNumber(json, "views"); - return value.longValue(); + public long getViewCount() { + return json.getLong("views"); } @Override - public long getLikeCount() throws ParsingException { - Number value = JsonUtils.getNumber(json, "likes"); - return value.longValue(); + public long getLikeCount() { + return json.getLong("likes"); } @Override - public long getDislikeCount() throws ParsingException { - Number value = JsonUtils.getNumber(json, "dislikes"); - return value.longValue(); + public long getDislikeCount() { + return json.getLong("dislikes"); } @Override public String getUploaderUrl() throws ParsingException { - String name = JsonUtils.getString(json, "account.name"); - String host = JsonUtils.getString(json, "account.host"); + final String name = JsonUtils.getString(json, "account.name"); + final String host = JsonUtils.getString(json, "account.host"); return getService().getChannelLHFactory().fromId("accounts/" + name + "@" + host, baseUrl).getUrl(); } @@ -147,7 +140,7 @@ public class PeertubeStreamExtractor extends StreamExtractor { } @Override - public String getUploaderAvatarUrl() throws ParsingException { + public String getUploaderAvatarUrl() { String value; try { value = JsonUtils.getString(json, "account.avatar.path"); @@ -170,7 +163,7 @@ public class PeertubeStreamExtractor extends StreamExtractor { @Nonnull @Override - public String getSubChannelAvatarUrl() throws ParsingException { + public String getSubChannelAvatarUrl() { String value; try { value = JsonUtils.getString(json, "channel.avatar.path"); @@ -181,35 +174,35 @@ public class PeertubeStreamExtractor extends StreamExtractor { } @Override - public String getDashMpdUrl() throws ParsingException { + public String getDashMpdUrl() { return ""; } @Override - public String getHlsUrl() throws ParsingException { + public String getHlsUrl() { return ""; } @Override - public List getAudioStreams() throws IOException, ExtractionException { + public List getAudioStreams() { return null; } @Override - public List getVideoStreams() throws IOException, ExtractionException { + public List getVideoStreams() throws ExtractionException { assertPageFetched(); - List videoStreams = new ArrayList<>(); + final List videoStreams = new ArrayList<>(); try { - JsonArray streams = json.getArray("files"); - for (Object s : streams) { + final JsonArray streams = json.getArray("files"); + for (final Object s : streams) { if (!(s instanceof JsonObject)) continue; - JsonObject stream = (JsonObject) s; - String url = JsonUtils.getString(stream, "fileUrl"); - String torrentUrl = JsonUtils.getString(stream, "torrentUrl"); - String resolution = JsonUtils.getString(stream, "resolution.label"); - String extension = url.substring(url.lastIndexOf(".") + 1); - MediaFormat format = MediaFormat.getFromSuffix(extension); - VideoStream videoStream = new VideoStream(url, torrentUrl, format, resolution); + final JsonObject stream = (JsonObject) s; + final String url = JsonUtils.getString(stream, "fileUrl"); + final String torrentUrl = JsonUtils.getString(stream, "torrentUrl"); + final String resolution = JsonUtils.getString(stream, "resolution.label"); + final String extension = url.substring(url.lastIndexOf(".") + 1); + final MediaFormat format = MediaFormat.getFromSuffix(extension); + final VideoStream videoStream = new VideoStream(url, torrentUrl, format, resolution); if (!Stream.containSimilarStream(videoStream, videoStreams)) { videoStreams.add(videoStream); } @@ -223,20 +216,19 @@ public class PeertubeStreamExtractor extends StreamExtractor { @Override - public List getVideoOnlyStreams() throws IOException, ExtractionException { - // TODO Auto-generated method stub - return null; + public List getVideoOnlyStreams() { + return Collections.emptyList(); } @Override - public List getSubtitlesDefault() throws IOException, ExtractionException { + public List getSubtitlesDefault() { return subtitles; } @Override - public List getSubtitles(final MediaFormat format) throws IOException, ExtractionException { - List filteredSubs = new ArrayList<>(); - for (SubtitlesStream sub : subtitles) { + public List getSubtitles(final MediaFormat format) { + final List filteredSubs = new ArrayList<>(); + for (final SubtitlesStream sub : subtitles) { if (sub.getFormat() == format) { filteredSubs.add(sub); } @@ -245,20 +237,15 @@ public class PeertubeStreamExtractor extends StreamExtractor { } @Override - public StreamType getStreamType() throws ParsingException { + public StreamType getStreamType() { return StreamType.VIDEO_STREAM; } - @Override - public StreamInfoItem getNextStream() throws IOException, ExtractionException { - return null; - } - @Override public StreamInfoItemsCollector getRelatedStreams() throws IOException, ExtractionException { - StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); - List tags = getTags(); - String apiUrl = null; + final StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); + final List tags = getTags(); + final String apiUrl; if (!tags.isEmpty()) { apiUrl = getRelatedStreamsUrl(tags); @@ -280,7 +267,7 @@ public class PeertubeStreamExtractor extends StreamExtractor { @Nonnull @Override - public String getSupportInfo() throws ParsingException { + public String getSupportInfo() { try { return JsonUtils.getString(json, "support"); } catch (ParsingException e) { @@ -288,21 +275,21 @@ public class PeertubeStreamExtractor extends StreamExtractor { } } - private String getRelatedStreamsUrl(List tags) throws UnsupportedEncodingException { - String url = baseUrl + PeertubeSearchQueryHandlerFactory.SEARCH_ENDPOINT; - StringBuilder params = new StringBuilder(); + private String getRelatedStreamsUrl(final List tags) throws UnsupportedEncodingException { + final String url = baseUrl + PeertubeSearchQueryHandlerFactory.SEARCH_ENDPOINT; + final StringBuilder params = new StringBuilder(); params.append("start=0&count=8&sort=-createdAt"); - for (String tag : tags) { + for (final String tag : tags) { params.append("&tagsOneOf="); params.append(URLEncoder.encode(tag, "UTF-8")); } return url + "?" + params.toString(); } - private void getStreamsFromApi(StreamInfoItemsCollector collector, String apiUrl) throws ReCaptchaException, IOException, ParsingException { - Response response = getDownloader().get(apiUrl); + private void getStreamsFromApi(final StreamInfoItemsCollector collector, final String apiUrl) throws ReCaptchaException, IOException, ParsingException { + final Response response = getDownloader().get(apiUrl); JsonObject relatedVideosJson = null; - if (null != response && !Utils.isBlank(response.responseBody())) { + if (response != null && !Utils.isBlank(response.responseBody())) { try { relatedVideosJson = JsonParser.object().from(response.responseBody()); } catch (JsonParserException e) { @@ -315,66 +302,64 @@ public class PeertubeStreamExtractor extends StreamExtractor { } } - private void collectStreamsFrom(StreamInfoItemsCollector collector, JsonObject json) throws ParsingException { - JsonArray contents; + private void collectStreamsFrom(final StreamInfoItemsCollector collector, final JsonObject json) throws ParsingException { + final JsonArray contents; try { contents = (JsonArray) JsonUtils.getValue(json, "data"); } catch (Exception e) { throw new ParsingException("unable to extract related videos", e); } - for (Object c : contents) { + for (final Object c : contents) { if (c instanceof JsonObject) { final JsonObject item = (JsonObject) c; - PeertubeStreamInfoItemExtractor extractor = new PeertubeStreamInfoItemExtractor(item, baseUrl); + final PeertubeStreamInfoItemExtractor extractor = new PeertubeStreamInfoItemExtractor(item, baseUrl); //do not add the same stream in related streams if (!extractor.getUrl().equals(getUrl())) collector.commit(extractor); } } - } - @Override public String getErrorMessage() { return null; } @Override - public void onFetchPage(Downloader downloader) throws IOException, ExtractionException { - Response response = downloader.get(getUrl()); - if (null != response && null != response.responseBody()) { + public void onFetchPage(final Downloader downloader) throws IOException, ExtractionException { + final Response response = downloader.get(getUrl()); + if (response != null && response.responseBody() != null) { setInitialData(response.responseBody()); } else { - throw new ExtractionException("Unable to extract peertube channel data"); + throw new ExtractionException("Unable to extract PeerTube channel data"); } loadSubtitles(); } - private void setInitialData(String responseBody) throws ExtractionException { + private void setInitialData(final String responseBody) throws ExtractionException { try { json = JsonParser.object().from(responseBody); } catch (JsonParserException e) { - throw new ExtractionException("Unable to extract peertube stream data", e); + throw new ExtractionException("Unable to extract PeerTube stream data", e); } - if (null == json) throw new ExtractionException("Unable to extract peertube stream data"); + if (json == null) throw new ExtractionException("Unable to extract PeerTube stream data"); PeertubeParsingHelper.validate(json); } private void loadSubtitles() { if (subtitles.isEmpty()) { try { - Response response = getDownloader().get(getUrl() + "/captions"); - JsonObject captionsJson = JsonParser.object().from(response.responseBody()); - JsonArray captions = JsonUtils.getArray(captionsJson, "data"); - for (Object c : captions) { + final Response response = getDownloader().get(getUrl() + "/captions"); + final JsonObject captionsJson = JsonParser.object().from(response.responseBody()); + final JsonArray captions = JsonUtils.getArray(captionsJson, "data"); + for (final Object c : captions) { if (c instanceof JsonObject) { - JsonObject caption = (JsonObject) c; - String url = baseUrl + JsonUtils.getString(caption, "captionPath"); - String languageCode = JsonUtils.getString(caption, "language.id"); - String ext = url.substring(url.lastIndexOf(".") + 1); - MediaFormat fmt = MediaFormat.getFromSuffix(ext); + final JsonObject caption = (JsonObject) c; + final String url = baseUrl + JsonUtils.getString(caption, "captionPath"); + final String languageCode = JsonUtils.getString(caption, "language.id"); + final String ext = url.substring(url.lastIndexOf(".") + 1); + final MediaFormat fmt = MediaFormat.getFromSuffix(ext); if (fmt != null && languageCode != null) subtitles.add(new SubtitlesStream(fmt, languageCode, url, false)); } @@ -416,7 +401,7 @@ public class PeertubeStreamExtractor extends StreamExtractor { } @Override - public Locale getLanguageInfo() throws ParsingException { + public Locale getLanguageInfo() { try { return new Locale(JsonUtils.getString(json, "language.id")); } catch (ParsingException e) { diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeStreamInfoItemExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeStreamInfoItemExtractor.java index df8b8a609..edb72c164 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeStreamInfoItemExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeStreamInfoItemExtractor.java @@ -1,6 +1,7 @@ package org.schabi.newpipe.extractor.services.peertube.extractors; import com.grack.nanojson.JsonObject; + import org.schabi.newpipe.extractor.ServiceList; import org.schabi.newpipe.extractor.exceptions.ParsingException; import org.schabi.newpipe.extractor.localization.DateWrapper; @@ -10,24 +11,23 @@ import org.schabi.newpipe.extractor.stream.StreamType; import org.schabi.newpipe.extractor.utils.JsonUtils; public class PeertubeStreamInfoItemExtractor implements StreamInfoItemExtractor { - protected final JsonObject item; private final String baseUrl; - public PeertubeStreamInfoItemExtractor(JsonObject item, String baseUrl) { + public PeertubeStreamInfoItemExtractor(final JsonObject item, final String baseUrl) { this.item = item; this.baseUrl = baseUrl; } @Override public String getUrl() throws ParsingException { - String uuid = JsonUtils.getString(item, "uuid"); + final String uuid = JsonUtils.getString(item, "uuid"); return ServiceList.PeerTube.getStreamLHFactory().fromId(uuid, baseUrl).getUrl(); } @Override public String getThumbnailUrl() throws ParsingException { - String value = JsonUtils.getString(item, "thumbnailPath"); + final String value = JsonUtils.getString(item, "thumbnailPath"); return baseUrl + value; } @@ -37,20 +37,19 @@ public class PeertubeStreamInfoItemExtractor implements StreamInfoItemExtractor } @Override - public boolean isAd() throws ParsingException { + public boolean isAd() { return false; } @Override - public long getViewCount() throws ParsingException { - Number value = JsonUtils.getNumber(item, "views"); - return value.longValue(); + public long getViewCount() { + return item.getLong("views"); } @Override public String getUploaderUrl() throws ParsingException { - String name = JsonUtils.getString(item, "account.name"); - String host = JsonUtils.getString(item, "account.host"); + final String name = JsonUtils.getString(item, "account.name"); + final String host = JsonUtils.getString(item, "account.host"); return ServiceList.PeerTube.getChannelLHFactory().fromId("accounts/" + name + "@" + host, baseUrl).getUrl(); } @@ -77,14 +76,12 @@ public class PeertubeStreamInfoItemExtractor implements StreamInfoItemExtractor } @Override - public StreamType getStreamType() throws ParsingException { + public StreamType getStreamType() { return StreamType.VIDEO_STREAM; } @Override - public long getDuration() throws ParsingException { - Number value = JsonUtils.getNumber(item, "duration"); - return value.longValue(); + public long getDuration() { + return item.getLong("duration"); } - } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeSubscriptionExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeSubscriptionExtractor.java deleted file mode 100644 index 170629002..000000000 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeSubscriptionExtractor.java +++ /dev/null @@ -1,21 +0,0 @@ -package org.schabi.newpipe.extractor.services.peertube.extractors; - -import org.schabi.newpipe.extractor.StreamingService; -import org.schabi.newpipe.extractor.subscription.SubscriptionExtractor; - -import java.util.List; - -public class PeertubeSubscriptionExtractor extends SubscriptionExtractor { - - public PeertubeSubscriptionExtractor(StreamingService service, List supportedSources) { - super(service, supportedSources); - // TODO Auto-generated constructor stub - } - - @Override - public String getRelatedUrl() { - // TODO Auto-generated method stub - return null; - } - -} diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeSuggestionExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeSuggestionExtractor.java index a028e77a1..afa060aca 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeSuggestionExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeSuggestionExtractor.java @@ -1,22 +1,18 @@ package org.schabi.newpipe.extractor.services.peertube.extractors; import org.schabi.newpipe.extractor.StreamingService; -import org.schabi.newpipe.extractor.exceptions.ExtractionException; import org.schabi.newpipe.extractor.suggestion.SuggestionExtractor; -import java.io.IOException; import java.util.Collections; import java.util.List; public class PeertubeSuggestionExtractor extends SuggestionExtractor { - - public PeertubeSuggestionExtractor(StreamingService service) { + public PeertubeSuggestionExtractor(final StreamingService service) { super(service); } @Override - public List suggestionList(String query) throws IOException, ExtractionException { + public List suggestionList(final String query) { return Collections.emptyList(); } - } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeTrendingExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeTrendingExtractor.java index e6f0dc69d..fdc32aa19 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeTrendingExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/extractors/PeertubeTrendingExtractor.java @@ -1,9 +1,9 @@ package org.schabi.newpipe.extractor.services.peertube.extractors; -import com.grack.nanojson.JsonArray; import com.grack.nanojson.JsonObject; import com.grack.nanojson.JsonParser; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.downloader.Downloader; import org.schabi.newpipe.extractor.downloader.Response; @@ -14,19 +14,20 @@ import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler; import org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper; import org.schabi.newpipe.extractor.stream.StreamInfoItem; import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector; -import org.schabi.newpipe.extractor.utils.JsonUtils; import org.schabi.newpipe.extractor.utils.Utils; import java.io.IOException; -import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.*; +import javax.annotation.Nonnull; + +import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.COUNT_KEY; +import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.ITEMS_PER_PAGE; +import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.START_KEY; +import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.collectStreamsFrom; +import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty; public class PeertubeTrendingExtractor extends KioskExtractor { - - private InfoItemsPage initPage; - private long total; - - public PeertubeTrendingExtractor(StreamingService streamingService, ListLinkHandler linkHandler, String kioskId) { + public PeertubeTrendingExtractor(final StreamingService streamingService, final ListLinkHandler linkHandler, final String kioskId) { super(streamingService, linkHandler, kioskId); } @@ -37,38 +38,18 @@ public class PeertubeTrendingExtractor extends KioskExtractor { @Override public InfoItemsPage getInitialPage() throws IOException, ExtractionException { - super.fetchPage(); - return initPage; - } - - private void collectStreamsFrom(StreamInfoItemsCollector collector, JsonObject json, String pageUrl) throws ParsingException { - JsonArray contents; - try { - contents = (JsonArray) JsonUtils.getValue(json, "data"); - } catch (Exception e) { - throw new ParsingException("Unable to extract kiosk info", e); - } - - String baseUrl = getBaseUrl(); - for (Object c : contents) { - if (c instanceof JsonObject) { - final JsonObject item = (JsonObject) c; - PeertubeStreamInfoItemExtractor extractor = new PeertubeStreamInfoItemExtractor(item, baseUrl); - collector.commit(extractor); - } - } - + final String pageUrl = getUrl() + "&" + START_KEY + "=0&" + COUNT_KEY + "=" + ITEMS_PER_PAGE; + return getPage(new Page(pageUrl)); } @Override - public String getNextPageUrl() throws IOException, ExtractionException { - super.fetchPage(); - return initPage.getNextPageUrl(); - } + public InfoItemsPage getPage(final Page page) throws IOException, ExtractionException { + if (page == null || isNullOrEmpty(page.getUrl())) { + throw new IllegalArgumentException("Page doesn't contain an URL"); + } + + final Response response = getDownloader().get(page.getUrl()); - @Override - public InfoItemsPage getPage(String pageUrl) throws IOException, ExtractionException { - Response response = getDownloader().get(pageUrl); JsonObject json = null; if (response != null && !Utils.isBlank(response.responseBody())) { try { @@ -78,20 +59,19 @@ public class PeertubeTrendingExtractor extends KioskExtractor { } } - StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); if (json != null) { - Number number = JsonUtils.getNumber(json, "total"); - if (number != null) this.total = number.longValue(); - collectStreamsFrom(collector, json, pageUrl); + PeertubeParsingHelper.validate(json); + final long total = json.getLong("total"); + + final StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); + collectStreamsFrom(collector, json, getBaseUrl()); + + return new InfoItemsPage<>(collector, PeertubeParsingHelper.getNextPage(page.getUrl(), total)); } else { - throw new ExtractionException("Unable to get peertube kiosk info"); + throw new ExtractionException("Unable to get PeerTube kiosk info"); } - return new InfoItemsPage<>(collector, PeertubeParsingHelper.getNextPageUrl(pageUrl, total)); } @Override - public void onFetchPage(Downloader downloader) throws IOException, ExtractionException { - this.initPage = getPage(getUrl() + "&" + START_KEY + "=0&" + COUNT_KEY + "=" + ITEMS_PER_PAGE); - } - + public void onFetchPage(@Nonnull final Downloader downloader) throws IOException, ExtractionException { } } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/linkHandler/PeertubeStreamLinkHandlerFactory.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/linkHandler/PeertubeStreamLinkHandlerFactory.java index f181d3e71..d3f369fcc 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/linkHandler/PeertubeStreamLinkHandlerFactory.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/peertube/linkHandler/PeertubeStreamLinkHandlerFactory.java @@ -9,7 +9,7 @@ import org.schabi.newpipe.extractor.utils.Parser; public class PeertubeStreamLinkHandlerFactory extends LinkHandlerFactory { private static final PeertubeStreamLinkHandlerFactory instance = new PeertubeStreamLinkHandlerFactory(); - private static final String ID_PATTERN = "/videos/(watch/)?([^/?&#]*)"; + private static final String ID_PATTERN = "/videos/(watch/|embed/)?([^/?&#]*)"; private static final String VIDEO_ENDPOINT = "/api/v1/videos/"; private PeertubeStreamLinkHandlerFactory() { diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/SoundcloudParsingHelper.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/SoundcloudParsingHelper.java index b8dafd834..29a625a90 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/SoundcloudParsingHelper.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/SoundcloudParsingHelper.java @@ -37,19 +37,21 @@ import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty; import static org.schabi.newpipe.extractor.utils.Utils.replaceHttpWithHttps; public class SoundcloudParsingHelper { - private static final String HARDCODED_CLIENT_ID = "Uz4aPhG7GAl1VYGOnvOPW1wQ0M6xKtA9"; // Updated on 16/03/20 + private static final String HARDCODED_CLIENT_ID = "H2c34Q0E7hftqnuDHGsk88DbNqhYpgMm"; // Updated on 24/06/20 private static String clientId; private SoundcloudParsingHelper() { } - public static String clientId() throws ExtractionException, IOException { + public synchronized static String clientId() throws ExtractionException, IOException { if (!isNullOrEmpty(clientId)) return clientId; Downloader dl = NewPipe.getDownloader(); clientId = HARDCODED_CLIENT_ID; if (checkIfHardcodedClientIdIsValid()) { return clientId; + } else { + clientId = null; } final Response download = dl.get("https://soundcloud.com"); diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudChannelExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudChannelExtractor.java index 295824259..351a9e38e 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudChannelExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudChannelExtractor.java @@ -1,9 +1,10 @@ package org.schabi.newpipe.extractor.services.soundcloud.extractors; -import com.grack.nanojson.JsonArray; import com.grack.nanojson.JsonObject; import com.grack.nanojson.JsonParser; import com.grack.nanojson.JsonParserException; + +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.channel.ChannelExtractor; import org.schabi.newpipe.extractor.downloader.Downloader; @@ -14,9 +15,10 @@ import org.schabi.newpipe.extractor.services.soundcloud.SoundcloudParsingHelper; import org.schabi.newpipe.extractor.stream.StreamInfoItem; import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector; -import javax.annotation.Nonnull; import java.io.IOException; +import javax.annotation.Nonnull; + import static org.schabi.newpipe.extractor.utils.JsonUtils.EMPTY_STRING; import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty; @@ -25,21 +27,18 @@ public class SoundcloudChannelExtractor extends ChannelExtractor { private String userId; private JsonObject user; - private StreamInfoItemsCollector streamInfoItemsCollector = null; - private String nextPageUrl = null; - - public SoundcloudChannelExtractor(StreamingService service, ListLinkHandler linkHandler) { + public SoundcloudChannelExtractor(final StreamingService service, final ListLinkHandler linkHandler) { super(service, linkHandler); } @Override - public void onFetchPage(@Nonnull Downloader downloader) throws IOException, ExtractionException { + public void onFetchPage(@Nonnull final Downloader downloader) throws IOException, ExtractionException { userId = getLinkHandler().getId(); - String apiUrl = "https://api-v2.soundcloud.com/users/" + userId + + final String apiUrl = "https://api-v2.soundcloud.com/users/" + userId + "?client_id=" + SoundcloudParsingHelper.clientId(); - String response = downloader.get(apiUrl, getExtractorLocalization()).responseBody(); + final String response = downloader.get(apiUrl, getExtractorLocalization()).responseBody(); try { user = JsonParser.object().from(response); } catch (JsonParserException e) { @@ -76,7 +75,7 @@ public class SoundcloudChannelExtractor extends ChannelExtractor { @Override public long getSubscriberCount() { - return user.getNumber("followers_count", 0).longValue(); + return user.getLong("followers_count", 0); } @Override @@ -85,61 +84,48 @@ public class SoundcloudChannelExtractor extends ChannelExtractor { } @Override - public String getParentChannelName() throws ParsingException { + public String getParentChannelName() { return ""; } @Override - public String getParentChannelUrl() throws ParsingException { + public String getParentChannelUrl() { return ""; } @Override - public String getParentChannelAvatarUrl() throws ParsingException { + public String getParentChannelAvatarUrl() { return ""; } @Nonnull @Override public InfoItemsPage getInitialPage() throws ExtractionException { - if (streamInfoItemsCollector == null) { - computeNextPageAndGetStreams(); - } - return new InfoItemsPage<>(streamInfoItemsCollector, getNextPageUrl()); - } - - @Override - public String getNextPageUrl() throws ExtractionException { - if (nextPageUrl == null) { - computeNextPageAndGetStreams(); - } - return nextPageUrl; - } - - private void computeNextPageAndGetStreams() throws ExtractionException { try { - streamInfoItemsCollector = new StreamInfoItemsCollector(getServiceId()); + final StreamInfoItemsCollector streamInfoItemsCollector = new StreamInfoItemsCollector(getServiceId()); - String apiUrl = "https://api-v2.soundcloud.com/users/" + getId() + "/tracks" + final String apiUrl = "https://api-v2.soundcloud.com/users/" + getId() + "/tracks" + "?client_id=" + SoundcloudParsingHelper.clientId() + "&limit=20" + "&linked_partitioning=1"; - nextPageUrl = SoundcloudParsingHelper.getStreamsFromApiMinItems(15, streamInfoItemsCollector, apiUrl); + final String nextPageUrl = SoundcloudParsingHelper.getStreamsFromApiMinItems(15, streamInfoItemsCollector, apiUrl); + + return new InfoItemsPage<>(streamInfoItemsCollector, new Page(nextPageUrl)); } catch (Exception e) { throw new ExtractionException("Could not get next page", e); } } @Override - public InfoItemsPage getPage(final String pageUrl) throws IOException, ExtractionException { - if (isNullOrEmpty(pageUrl)) { - throw new ExtractionException(new IllegalArgumentException("Page url is empty or null")); + public InfoItemsPage getPage(final Page page) throws IOException, ExtractionException { + if (page == null || isNullOrEmpty(page.getUrl())) { + throw new IllegalArgumentException("Page doesn't contain an URL"); } - StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); - String nextPageUrl = SoundcloudParsingHelper.getStreamsFromApiMinItems(15, collector, pageUrl); + final StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); + final String nextPageUrl = SoundcloudParsingHelper.getStreamsFromApiMinItems(15, collector, page.getUrl()); - return new InfoItemsPage<>(collector, nextPageUrl); + return new InfoItemsPage<>(collector, new Page(nextPageUrl)); } } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudChannelInfoItemExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudChannelInfoItemExtractor.java index 274448588..7fcc28a6c 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudChannelInfoItemExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudChannelInfoItemExtractor.java @@ -32,12 +32,12 @@ public class SoundcloudChannelInfoItemExtractor implements ChannelInfoItemExtrac @Override public long getSubscriberCount() { - return itemObject.getNumber("followers_count", 0).longValue(); + return itemObject.getLong("followers_count"); } @Override public long getStreamCount() { - return itemObject.getNumber("track_count", 0).longValue(); + return itemObject.getLong("track_count"); } @Override diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudChartsExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudChartsExtractor.java index 0c15d9c6b..e58dc9917 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudChartsExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudChartsExtractor.java @@ -1,5 +1,6 @@ package org.schabi.newpipe.extractor.services.soundcloud.extractors; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.downloader.Downloader; import org.schabi.newpipe.extractor.exceptions.ExtractionException; @@ -9,16 +10,14 @@ import org.schabi.newpipe.extractor.services.soundcloud.SoundcloudParsingHelper; import org.schabi.newpipe.extractor.stream.StreamInfoItem; import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector; -import javax.annotation.Nonnull; import java.io.IOException; +import javax.annotation.Nonnull; + import static org.schabi.newpipe.extractor.ServiceList.SoundCloud; import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty; public class SoundcloudChartsExtractor extends KioskExtractor { - private StreamInfoItemsCollector collector = null; - private String nextPageUrl = null; - public SoundcloudChartsExtractor(StreamingService service, ListLinkHandler linkHandler, String kioskId) { @@ -36,20 +35,21 @@ public class SoundcloudChartsExtractor extends KioskExtractor { } @Override - public InfoItemsPage getPage(String pageUrl) throws IOException, ExtractionException { - if (isNullOrEmpty(pageUrl)) { - throw new ExtractionException(new IllegalArgumentException("Page url is empty or null")); + public InfoItemsPage getPage(final Page page) throws IOException, ExtractionException { + if (page == null || isNullOrEmpty(page.getUrl())) { + throw new IllegalArgumentException("Page doesn't contain an URL"); } - StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); - String nextPageUrl = SoundcloudParsingHelper.getStreamsFromApi(collector, pageUrl, true); + final StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); + final String nextPageUrl = SoundcloudParsingHelper.getStreamsFromApi(collector, page.getUrl(), true); - return new InfoItemsPage<>(collector, nextPageUrl); + return new InfoItemsPage<>(collector, new Page(nextPageUrl)); } - - private void computeNextPageAndStreams() throws IOException, ExtractionException { - collector = new StreamInfoItemsCollector(getServiceId()); + @Nonnull + @Override + public InfoItemsPage getInitialPage() throws IOException, ExtractionException { + final StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); String apiUrl = "https://api-v2.soundcloud.com/charts" + "?genre=soundcloud:genres:all-music" + @@ -61,27 +61,11 @@ public class SoundcloudChartsExtractor extends KioskExtractor { apiUrl += "&kind=trending"; } - - String contentCountry = SoundCloud.getContentCountry().getCountryCode(); + final String contentCountry = SoundCloud.getContentCountry().getCountryCode(); apiUrl += "®ion=soundcloud:regions:" + contentCountry; - nextPageUrl = SoundcloudParsingHelper.getStreamsFromApi(collector, apiUrl, true); - } + final String nextPageUrl = SoundcloudParsingHelper.getStreamsFromApi(collector, apiUrl, true); - @Override - public String getNextPageUrl() throws IOException, ExtractionException { - if (nextPageUrl == null) { - computeNextPageAndStreams(); - } - return nextPageUrl; - } - - @Nonnull - @Override - public InfoItemsPage getInitialPage() throws IOException, ExtractionException { - if (collector == null) { - computeNextPageAndStreams(); - } - return new InfoItemsPage<>(collector, getNextPageUrl()); + return new InfoItemsPage<>(collector, new Page(nextPageUrl)); } } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudCommentsExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudCommentsExtractor.java index 71c6469d8..f12559c3c 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudCommentsExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudCommentsExtractor.java @@ -4,7 +4,9 @@ import com.grack.nanojson.JsonArray; import com.grack.nanojson.JsonObject; import com.grack.nanojson.JsonParser; import com.grack.nanojson.JsonParserException; + import org.schabi.newpipe.extractor.NewPipe; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.comments.CommentsExtractor; import org.schabi.newpipe.extractor.comments.CommentsInfoItem; @@ -15,58 +17,63 @@ import org.schabi.newpipe.extractor.exceptions.ExtractionException; import org.schabi.newpipe.extractor.exceptions.ParsingException; import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler; -import javax.annotation.Nonnull; import java.io.IOException; +import javax.annotation.Nonnull; + +import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty; + public class SoundcloudCommentsExtractor extends CommentsExtractor { - - private JsonObject json; - - public SoundcloudCommentsExtractor(StreamingService service, ListLinkHandler uiHandler) { + public SoundcloudCommentsExtractor(final StreamingService service, final ListLinkHandler uiHandler) { super(service, uiHandler); } @Nonnull @Override - public InfoItemsPage getInitialPage() throws IOException, ExtractionException { - final CommentsInfoItemsCollector collector = new CommentsInfoItemsCollector(getServiceId()); + public InfoItemsPage getInitialPage() throws ExtractionException, IOException { + final Downloader downloader = NewPipe.getDownloader(); + final Response response = downloader.get(getUrl()); - collectStreamsFrom(collector, json.getArray("collection")); - - return new InfoItemsPage<>(collector, getNextPageUrl()); - } - - @Override - public String getNextPageUrl() throws IOException, ExtractionException { - return json.getString("next_href"); - } - - @Override - public InfoItemsPage getPage(String pageUrl) throws IOException, ExtractionException { - Downloader dl = NewPipe.getDownloader(); - Response rp = dl.get(pageUrl); - try { - json = JsonParser.object().from(rp.responseBody()); - } catch (JsonParserException e) { - throw new ParsingException("Could not parse json", e); - } - - final CommentsInfoItemsCollector collector = new CommentsInfoItemsCollector(getServiceId()); - collectStreamsFrom(collector, json.getArray("collection")); - - return new InfoItemsPage<>(collector, getNextPageUrl()); - } - - @Override - public void onFetchPage(@Nonnull Downloader downloader) throws IOException, ExtractionException { - Response response = downloader.get(getUrl()); + final JsonObject json; try { json = JsonParser.object().from(response.responseBody()); } catch (JsonParserException e) { throw new ParsingException("Could not parse json", e); } + + final CommentsInfoItemsCollector collector = new CommentsInfoItemsCollector(getServiceId()); + + collectStreamsFrom(collector, json.getArray("collection")); + + return new InfoItemsPage<>(collector, new Page(json.getString("next_href"))); } + @Override + public InfoItemsPage getPage(final Page page) throws ExtractionException, IOException { + if (page == null || isNullOrEmpty(page.getUrl())) { + throw new IllegalArgumentException("Page doesn't contain an URL"); + } + + final Downloader downloader = NewPipe.getDownloader(); + final Response response = downloader.get(page.getUrl()); + + final JsonObject json; + try { + json = JsonParser.object().from(response.responseBody()); + } catch (JsonParserException e) { + throw new ParsingException("Could not parse json", e); + } + + final CommentsInfoItemsCollector collector = new CommentsInfoItemsCollector(getServiceId()); + + collectStreamsFrom(collector, json.getArray("collection")); + + return new InfoItemsPage<>(collector, new Page(json.getString("next_href"))); + } + + @Override + public void onFetchPage(@Nonnull final Downloader downloader) { } + private void collectStreamsFrom(final CommentsInfoItemsCollector collector, final JsonArray entries) throws ParsingException { final String url = getUrl(); for (Object comment : entries) { diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudCommentsInfoItemExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudCommentsInfoItemExtractor.java index 8a478dffd..7969904eb 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudCommentsInfoItemExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudCommentsInfoItemExtractor.java @@ -6,10 +6,11 @@ import org.schabi.newpipe.extractor.exceptions.ParsingException; import org.schabi.newpipe.extractor.localization.DateWrapper; import org.schabi.newpipe.extractor.services.soundcloud.SoundcloudParsingHelper; +import java.util.Objects; + import javax.annotation.Nullable; public class SoundcloudCommentsInfoItemExtractor implements CommentsInfoItemExtractor { - private JsonObject json; private String url; @@ -19,32 +20,32 @@ public class SoundcloudCommentsInfoItemExtractor implements CommentsInfoItemExtr } @Override - public String getCommentId() throws ParsingException { - return json.getNumber("id").toString(); + public String getCommentId() { + return Objects.toString(json.getLong("id"), null); } @Override - public String getCommentText() throws ParsingException { + public String getCommentText() { return json.getString("body"); } @Override - public String getUploaderName() throws ParsingException { + public String getUploaderName() { return json.getObject("user").getString("username"); } @Override - public String getUploaderAvatarUrl() throws ParsingException { + public String getUploaderAvatarUrl() { return json.getObject("user").getString("avatar_url"); } @Override - public String getUploaderUrl() throws ParsingException { + public String getUploaderUrl() { return json.getObject("user").getString("permalink_url"); } @Override - public String getTextualUploadDate() throws ParsingException { + public String getTextualUploadDate() { return json.getString("created_at"); } @@ -55,7 +56,7 @@ public class SoundcloudCommentsInfoItemExtractor implements CommentsInfoItemExtr } @Override - public int getLikeCount() throws ParsingException { + public int getLikeCount() { return -1; } @@ -70,7 +71,7 @@ public class SoundcloudCommentsInfoItemExtractor implements CommentsInfoItemExtr } @Override - public String getThumbnailUrl() throws ParsingException { + public String getThumbnailUrl() { return json.getObject("user").getString("avatar_url"); } } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudPlaylistExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudPlaylistExtractor.java index 484c08fd4..878a7766c 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudPlaylistExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudPlaylistExtractor.java @@ -6,6 +6,7 @@ import com.grack.nanojson.JsonParser; import com.grack.nanojson.JsonParserException; import org.schabi.newpipe.extractor.NewPipe; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.downloader.Downloader; import org.schabi.newpipe.extractor.exceptions.ExtractionException; @@ -15,24 +16,23 @@ import org.schabi.newpipe.extractor.playlist.PlaylistExtractor; import org.schabi.newpipe.extractor.services.soundcloud.SoundcloudParsingHelper; import org.schabi.newpipe.extractor.stream.StreamInfoItem; import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector; +import org.schabi.newpipe.extractor.utils.Utils; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import javax.annotation.Nonnull; import javax.annotation.Nullable; import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty; -@SuppressWarnings("WeakerAccess") public class SoundcloudPlaylistExtractor extends PlaylistExtractor { - private static final int streamsPerRequestedPage = 15; + private static final int STREAMS_PER_REQUESTED_PAGE = 15; private String playlistId; private JsonObject playlist; - private StreamInfoItemsCollector streamInfoItemsCollector; - private String nextPageUrl; - public SoundcloudPlaylistExtractor(StreamingService service, ListLinkHandler linkHandler) { super(service, linkHandler); } @@ -113,98 +113,73 @@ public class SoundcloudPlaylistExtractor extends PlaylistExtractor { @Override public long getStreamCount() { - return playlist.getNumber("track_count", 0).longValue(); + return playlist.getLong("track_count"); } @Nonnull @Override - public String getSubChannelName() throws ParsingException { + public String getSubChannelName() { return ""; } @Nonnull @Override - public String getSubChannelUrl() throws ParsingException { + public String getSubChannelUrl() { return ""; } @Nonnull @Override - public String getSubChannelAvatarUrl() throws ParsingException { + public String getSubChannelAvatarUrl() { return ""; } - @Nonnull - @Override - public InfoItemsPage getInitialPage() throws IOException, ExtractionException { - if (streamInfoItemsCollector == null) { - computeInitialTracksAndNextPageUrl(); - } - return new InfoItemsPage<>(streamInfoItemsCollector, nextPageUrl); - } + public InfoItemsPage getInitialPage() { + final StreamInfoItemsCollector streamInfoItemsCollector = new StreamInfoItemsCollector(getServiceId()); + final List ids = new ArrayList<>(); - private void computeInitialTracksAndNextPageUrl() throws IOException, ExtractionException { - streamInfoItemsCollector = new StreamInfoItemsCollector(getServiceId()); - StringBuilder nextPageUrlBuilder = new StringBuilder("https://api-v2.soundcloud.com/tracks?client_id="); - nextPageUrlBuilder.append(SoundcloudParsingHelper.clientId()); - nextPageUrlBuilder.append("&ids="); - - JsonArray tracks = playlist.getArray("tracks"); + final JsonArray tracks = playlist.getArray("tracks"); for (Object o : tracks) { if (o instanceof JsonObject) { - JsonObject track = (JsonObject) o; + final JsonObject track = (JsonObject) o; if (track.has("title")) { // i.e. if full info is available streamInfoItemsCollector.commit(new SoundcloudStreamInfoItemExtractor(track)); } else { // %09d would be enough, but a 0 before the number does not create problems, so let's be sure - nextPageUrlBuilder.append(String.format("%010d,", track.getInt("id"))); + ids.add(String.format("%010d", track.getInt("id"))); } } } - nextPageUrlBuilder.setLength(nextPageUrlBuilder.length() - 1); // remove trailing , - nextPageUrl = nextPageUrlBuilder.toString(); - if (nextPageUrl.endsWith("&ids")) { - // there are no other videos - nextPageUrl = ""; - } + return new InfoItemsPage<>(streamInfoItemsCollector, new Page(ids)); } @Override - public String getNextPageUrl() throws IOException, ExtractionException { - if (nextPageUrl == null) { - computeInitialTracksAndNextPageUrl(); - } - return nextPageUrl; - } - - @Override - public InfoItemsPage getPage(String pageUrl) throws IOException, ExtractionException { - if (isNullOrEmpty(pageUrl)) { - throw new ExtractionException(new IllegalArgumentException("Page url is empty or null")); + public InfoItemsPage getPage(final Page page) throws IOException, ExtractionException { + if (page == null || isNullOrEmpty(page.getIds())) { + throw new IllegalArgumentException("Page doesn't contain IDs"); } - // see computeInitialTracksAndNextPageUrl - final int lengthFirstPartOfUrl = ("https://api-v2.soundcloud.com/tracks?client_id=" - + SoundcloudParsingHelper.clientId() - + "&ids=").length(); - final int lengthOfEveryStream = 11; - - String currentPageUrl, nextUrl; - int lengthMaxStreams = lengthFirstPartOfUrl + lengthOfEveryStream * streamsPerRequestedPage; - if (pageUrl.length() <= lengthMaxStreams) { - currentPageUrl = pageUrl; // fetch every remaining video, there are less than the max - nextUrl = ""; // afterwards the list is complete + final List currentIds; + final List nextIds; + if (page.getIds().size() <= STREAMS_PER_REQUESTED_PAGE) { + // Fetch every remaining stream, there are less than the max + currentIds = page.getIds(); + nextIds = null; } else { - currentPageUrl = pageUrl.substring(0, lengthMaxStreams); - nextUrl = pageUrl.substring(0, lengthFirstPartOfUrl) + pageUrl.substring(lengthMaxStreams); + currentIds = page.getIds().subList(0, STREAMS_PER_REQUESTED_PAGE); + nextIds = page.getIds().subList(STREAMS_PER_REQUESTED_PAGE, page.getIds().size()); } - StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); - String response = NewPipe.getDownloader().get(currentPageUrl, getExtractorLocalization()).responseBody(); + final String currentPageUrl = "https://api-v2.soundcloud.com/tracks?client_id=" + + SoundcloudParsingHelper.clientId() + + "&ids=" + Utils.join(",", currentIds); + + final StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); + final String response = NewPipe.getDownloader().get(currentPageUrl, getExtractorLocalization()).responseBody(); try { - JsonArray tracks = JsonParser.array().from(response); + final JsonArray tracks = JsonParser.array().from(response); for (Object track : tracks) { if (track instanceof JsonObject) { collector.commit(new SoundcloudStreamInfoItemExtractor((JsonObject) track)); @@ -214,6 +189,6 @@ public class SoundcloudPlaylistExtractor extends PlaylistExtractor { throw new ParsingException("Could not parse json response", e); } - return new InfoItemsPage<>(collector, nextUrl); + return new InfoItemsPage<>(collector, new Page(nextIds)); } } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudPlaylistInfoItemExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudPlaylistInfoItemExtractor.java index f29efb1cc..7c4631258 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudPlaylistInfoItemExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudPlaylistInfoItemExtractor.java @@ -81,6 +81,6 @@ public class SoundcloudPlaylistInfoItemExtractor implements PlaylistInfoItemExtr @Override public long getStreamCount() { - return itemObject.getNumber("track_count", 0).longValue(); + return itemObject.getLong("track_count"); } } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudSearchExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudSearchExtractor.java index 24f5987ec..93a5e131a 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudSearchExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudSearchExtractor.java @@ -4,9 +4,11 @@ import com.grack.nanojson.JsonArray; import com.grack.nanojson.JsonObject; import com.grack.nanojson.JsonParser; import com.grack.nanojson.JsonParserException; + import org.schabi.newpipe.extractor.InfoItem; import org.schabi.newpipe.extractor.InfoItemExtractor; import org.schabi.newpipe.extractor.InfoItemsCollector; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.downloader.Downloader; import org.schabi.newpipe.extractor.exceptions.ExtractionException; @@ -16,17 +18,18 @@ import org.schabi.newpipe.extractor.search.InfoItemsSearchCollector; import org.schabi.newpipe.extractor.search.SearchExtractor; import org.schabi.newpipe.extractor.utils.Parser; -import javax.annotation.Nonnull; import java.io.IOException; import java.io.UnsupportedEncodingException; import java.net.MalformedURLException; import java.net.URL; +import javax.annotation.Nonnull; + import static org.schabi.newpipe.extractor.services.soundcloud.linkHandler.SoundcloudSearchQueryHandlerFactory.ITEMS_PER_PAGE; import static org.schabi.newpipe.extractor.utils.JsonUtils.EMPTY_STRING; +import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty; public class SoundcloudSearchExtractor extends SearchExtractor { - private JsonArray searchCollection; public SoundcloudSearchExtractor(StreamingService service, SearchQueryHandler linkHandler) { @@ -47,25 +50,24 @@ public class SoundcloudSearchExtractor extends SearchExtractor { @Nonnull @Override public InfoItemsPage getInitialPage() throws IOException, ExtractionException { - return new InfoItemsPage<>(collectItems(searchCollection), getNextPageUrl()); + return new InfoItemsPage<>(collectItems(searchCollection), getNextPageFromCurrentUrl(getUrl())); } @Override - public String getNextPageUrl() throws IOException, ExtractionException { - return getNextPageUrlFromCurrentUrl(getUrl()); - } + public InfoItemsPage getPage(final Page page) throws IOException, ExtractionException { + if (page == null || isNullOrEmpty(page.getUrl())) { + throw new IllegalArgumentException("Page doesn't contain an URL"); + } - @Override - public InfoItemsPage getPage(String pageUrl) throws IOException, ExtractionException { final Downloader dl = getDownloader(); try { - final String response = dl.get(pageUrl, getExtractorLocalization()).responseBody(); + final String response = dl.get(page.getUrl(), getExtractorLocalization()).responseBody(); searchCollection = JsonParser.object().from(response).getArray("collection"); } catch (JsonParserException e) { throw new ParsingException("Could not parse json response", e); } - return new InfoItemsPage<>(collectItems(searchCollection), getNextPageUrlFromCurrentUrl(pageUrl)); + return new InfoItemsPage<>(collectItems(searchCollection), getNextPageFromCurrentUrl(page.getUrl())); } @Override @@ -108,7 +110,7 @@ public class SoundcloudSearchExtractor extends SearchExtractor { return collector; } - private String getNextPageUrlFromCurrentUrl(String currentUrl) + private Page getNextPageFromCurrentUrl(String currentUrl) throws MalformedURLException, UnsupportedEncodingException { final int pageOffset = Integer.parseInt( Parser.compatParseMap( @@ -116,8 +118,7 @@ public class SoundcloudSearchExtractor extends SearchExtractor { .getQuery()) .get("offset")); - return currentUrl.replace("&offset=" + - Integer.toString(pageOffset), - "&offset=" + Integer.toString(pageOffset + ITEMS_PER_PAGE)); + return new Page(currentUrl.replace("&offset=" + pageOffset, + "&offset=" + (pageOffset + ITEMS_PER_PAGE))); } } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudStreamExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudStreamExtractor.java index 431baff94..6aee297d2 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudStreamExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudStreamExtractor.java @@ -19,7 +19,6 @@ import org.schabi.newpipe.extractor.services.soundcloud.SoundcloudParsingHelper; import org.schabi.newpipe.extractor.stream.AudioStream; import org.schabi.newpipe.extractor.stream.Description; import org.schabi.newpipe.extractor.stream.StreamExtractor; -import org.schabi.newpipe.extractor.stream.StreamInfoItem; import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector; import org.schabi.newpipe.extractor.stream.StreamType; import org.schabi.newpipe.extractor.stream.SubtitlesStream; @@ -102,7 +101,7 @@ public class SoundcloudStreamExtractor extends StreamExtractor { @Override public long getLength() { - return track.getNumber("duration", 0).longValue() / 1000L; + return track.getLong("duration") / 1000L; } @Override @@ -112,12 +111,12 @@ public class SoundcloudStreamExtractor extends StreamExtractor { @Override public long getViewCount() { - return track.getNumber("playback_count", 0).longValue(); + return track.getLong("playback_count"); } @Override public long getLikeCount() { - return track.getNumber("favoritings_count", -1).longValue(); + return track.getLong("favoritings_count", -1); } @Override @@ -261,11 +260,6 @@ public class SoundcloudStreamExtractor extends StreamExtractor { return StreamType.AUDIO_STREAM; } - @Override - public StreamInfoItem getNextStream() throws IOException, ExtractionException { - return null; - } - @Override public StreamInfoItemsCollector getRelatedStreams() throws IOException, ExtractionException { StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudStreamInfoItemExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudStreamInfoItemExtractor.java index 3aef17ff7..7da3ab5d1 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudStreamInfoItemExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/extractors/SoundcloudStreamInfoItemExtractor.java @@ -30,7 +30,7 @@ public class SoundcloudStreamInfoItemExtractor implements StreamInfoItemExtracto @Override public long getDuration() { - return itemObject.getNumber("duration", 0).longValue() / 1000L; + return itemObject.getLong("duration") / 1000L; } @Override @@ -53,13 +53,9 @@ public class SoundcloudStreamInfoItemExtractor implements StreamInfoItemExtracto return new DateWrapper(SoundcloudParsingHelper.parseDateFrom(getTextualUploadDate())); } - private String getCreatedAt() { - return itemObject.getString("created_at"); - } - @Override public long getViewCount() { - return itemObject.getNumber("playback_count", 0).longValue(); + return itemObject.getLong("playback_count"); } @Override diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/YoutubeParsingHelper.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/YoutubeParsingHelper.java index 6df9b4b87..e124d0208 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/YoutubeParsingHelper.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/YoutubeParsingHelper.java @@ -64,6 +64,8 @@ public class YoutubeParsingHelper { private static final String HARDCODED_CLIENT_VERSION = "2.20200214.04.00"; private static String clientVersion; + private static String key; + private static final String[] HARDCODED_YOUTUBE_MUSIC_KEYS = {"AIzaSyC9XL3ZjWddXya6X74dJoCTL-WEYFDNX30", "67", "0.1"}; private static String[] youtubeMusicKeys; @@ -105,10 +107,31 @@ public class YoutubeParsingHelper { public static boolean isInvidioURL(URL url) { String host = url.getHost(); - return host.equalsIgnoreCase("invidio.us") || host.equalsIgnoreCase("dev.invidio.us") || host.equalsIgnoreCase("www.invidio.us") || host.equalsIgnoreCase("invidious.snopyta.org") || host.equalsIgnoreCase("de.invidious.snopyta.org") || host.equalsIgnoreCase("fi.invidious.snopyta.org") || host.equalsIgnoreCase("vid.wxzm.sx") || host.equalsIgnoreCase("invidious.kabi.tk") || host.equalsIgnoreCase("invidiou.sh") || host.equalsIgnoreCase("www.invidiou.sh") || host.equalsIgnoreCase("no.invidiou.sh") || host.equalsIgnoreCase("invidious.enkirton.net") || host.equalsIgnoreCase("tube.poal.co") || host.equalsIgnoreCase("invidious.13ad.de") || host.equalsIgnoreCase("yt.elukerio.org"); + return host.equalsIgnoreCase("invidio.us") + || host.equalsIgnoreCase("dev.invidio.us") + || host.equalsIgnoreCase("www.invidio.us") + || host.equalsIgnoreCase("invidious.snopyta.org") + || host.equalsIgnoreCase("fi.invidious.snopyta.org") + || host.equalsIgnoreCase("yewtu.be") + || host.equalsIgnoreCase("invidious.ggc-project.de") + || host.equalsIgnoreCase("yt.maisputain.ovh") + || host.equalsIgnoreCase("invidious.13ad.de") + || host.equalsIgnoreCase("invidious.toot.koeln") + || host.equalsIgnoreCase("invidious.fdn.fr") + || host.equalsIgnoreCase("watch.nettohikari.com") + || host.equalsIgnoreCase("invidious.snwmds.net") + || host.equalsIgnoreCase("invidious.snwmds.org") + || host.equalsIgnoreCase("invidious.snwmds.com") + || host.equalsIgnoreCase("invidious.sunsetravens.com") + || host.equalsIgnoreCase("invidious.gachirangers.com"); } - public static long parseDurationString(String input) + /** + * Parses the duration string of the video expecting ":" or "." as separators + * @return the duration in seconds + * @throws ParsingException when more than 3 separators are found + */ + public static int parseDurationString(final String input) throws ParsingException, NumberFormatException { // If time separator : is not detected, try . instead final String[] splitInput = input.contains(":") @@ -142,10 +165,11 @@ public class YoutubeParsingHelper { default: throw new ParsingException("Error duration string with unknown format: " + input); } - return ((((Long.parseLong(Utils.removeNonDigitCharacters(days)) * 24) - + Long.parseLong(Utils.removeNonDigitCharacters(hours)) * 60) - + Long.parseLong(Utils.removeNonDigitCharacters(minutes))) * 60) - + Long.parseLong(Utils.removeNonDigitCharacters(seconds)); + + return ((Integer.parseInt(Utils.removeNonDigitCharacters(days)) * 24 + + Integer.parseInt(Utils.removeNonDigitCharacters(hours))) * 60 + + Integer.parseInt(Utils.removeNonDigitCharacters(minutes))) * 60 + + Integer.parseInt(Utils.removeNonDigitCharacters(seconds)); } public static String getFeedUrlFrom(final String channelIdOrUser) { @@ -192,39 +216,31 @@ public class YoutubeParsingHelper { return response.length() > 50; // ensure to have a valid response } - /** - * Get the client version from a page - * @return - * @throws ParsingException - */ - public static String getClientVersion() throws IOException, ExtractionException { - if (!isNullOrEmpty(clientVersion)) return clientVersion; - if (isHardcodedClientVersionValid()) return clientVersion = HARDCODED_CLIENT_VERSION; - + private static void extractClientVersionAndKey() throws IOException, ExtractionException { final String url = "https://www.youtube.com/results?search_query=test"; final String html = getDownloader().get(url).responseBody(); - JsonObject initialData = getInitialData(html); - JsonArray serviceTrackingParams = initialData.getObject("responseContext").getArray("serviceTrackingParams"); + final JsonObject initialData = getInitialData(html); + final JsonArray serviceTrackingParams = initialData.getObject("responseContext").getArray("serviceTrackingParams"); String shortClientVersion = null; // try to get version from initial data first - for (Object service : serviceTrackingParams) { - JsonObject s = (JsonObject) service; + for (final Object service : serviceTrackingParams) { + final JsonObject s = (JsonObject) service; if (s.getString("service").equals("CSI")) { - JsonArray params = s.getArray("params"); - for (Object param : params) { - JsonObject p = (JsonObject) param; - String key = p.getString("key"); + final JsonArray params = s.getArray("params"); + for (final Object param : params) { + final JsonObject p = (JsonObject) param; + final String key = p.getString("key"); if (key != null && key.equals("cver")) { - return clientVersion = p.getString("value"); + clientVersion = p.getString("value"); } } } else if (s.getString("service").equals("ECATCHER")) { // fallback to get a shortened client version which does not contain the last two digits - JsonArray params = s.getArray("params"); - for (Object param : params) { - JsonObject p = (JsonObject) param; - String key = p.getString("key"); + final JsonArray params = s.getArray("params"); + for (final Object param : params) { + final JsonObject p = (JsonObject) param; + final String key = p.getString("key"); if (key != null && key.equals("client.version")) { shortClientVersion = p.getString("value"); } @@ -233,26 +249,55 @@ public class YoutubeParsingHelper { } String contextClientVersion; - String[] patterns = { + final String[] patterns = { "INNERTUBE_CONTEXT_CLIENT_VERSION\":\"([0-9\\.]+?)\"", "innertube_context_client_version\":\"([0-9\\.]+?)\"", "client.version=([0-9\\.]+)" }; - for (String pattern : patterns) { + for (final String pattern : patterns) { try { contextClientVersion = Parser.matchGroup1(pattern, html); if (!isNullOrEmpty(contextClientVersion)) { - return clientVersion = contextClientVersion; + clientVersion = contextClientVersion; + break; } - } catch (Exception ignored) { - } + } catch (Parser.RegexException ignored) { } } - if (shortClientVersion != null) { - return clientVersion = shortClientVersion; + if (!isNullOrEmpty(clientVersion) && !isNullOrEmpty(shortClientVersion)) { + clientVersion = shortClientVersion; } - throw new ParsingException("Could not get client version"); + try { + key = Parser.matchGroup1("INNERTUBE_API_KEY\":\"([0-9a-zA-Z_-]+?)\"", html); + } catch (Parser.RegexException e) { + try { + key = Parser.matchGroup1("innertubeApiKey\":\"([0-9a-zA-Z_-]+?)\"", html); + } catch (Parser.RegexException ignored) { } + } + } + + /** + * Get the client version + */ + public static String getClientVersion() throws IOException, ExtractionException { + if (!isNullOrEmpty(clientVersion)) return clientVersion; + if (isHardcodedClientVersionValid()) return clientVersion = HARDCODED_CLIENT_VERSION; + + extractClientVersionAndKey(); + if (isNullOrEmpty(key)) throw new ParsingException("Could not extract client version"); + return clientVersion; + } + + /** + * Get the key + */ + public static String getKey() throws IOException, ExtractionException { + if (!isNullOrEmpty(key)) return key; + + extractClientVersionAndKey(); + if (isNullOrEmpty(key)) throw new ParsingException("Could not extract key"); + return key; } public static boolean areHardcodedYoutubeMusicKeysValid() throws IOException, ReCaptchaException { diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeChannelExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeChannelExtractor.java index f5cfbc243..45d2ac362 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeChannelExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeChannelExtractor.java @@ -2,6 +2,8 @@ package org.schabi.newpipe.extractor.services.youtube.extractors; import com.grack.nanojson.JsonArray; import com.grack.nanojson.JsonObject; + +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.channel.ChannelExtractor; import org.schabi.newpipe.extractor.downloader.Downloader; @@ -16,10 +18,13 @@ import org.schabi.newpipe.extractor.stream.StreamInfoItem; import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector; import org.schabi.newpipe.extractor.utils.Utils; -import javax.annotation.Nonnull; import java.io.IOException; -import static org.schabi.newpipe.extractor.services.youtube.YoutubeParsingHelper.*; +import javax.annotation.Nonnull; + +import static org.schabi.newpipe.extractor.services.youtube.YoutubeParsingHelper.fixThumbnailUrl; +import static org.schabi.newpipe.extractor.services.youtube.YoutubeParsingHelper.getJsonResponse; +import static org.schabi.newpipe.extractor.services.youtube.YoutubeParsingHelper.getTextFromObject; import static org.schabi.newpipe.extractor.utils.JsonUtils.EMPTY_STRING; import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty; @@ -104,15 +109,6 @@ public class YoutubeChannelExtractor extends ChannelExtractor { YoutubeParsingHelper.defaultAlertsCheck(initialData); } - - @Override - public String getNextPageUrl() throws ExtractionException { - if (getVideoTab() == null) return ""; - return getNextPageUrlFrom(getVideoTab().getObject("content").getObject("sectionListRenderer") - .getArray("contents").getObject(0).getObject("itemSectionRenderer") - .getArray("contents").getObject(0).getObject("gridRenderer").getArray("continuations")); - } - @Nonnull @Override public String getUrl() throws ParsingException { @@ -231,22 +227,27 @@ public class YoutubeChannelExtractor extends ChannelExtractor { @Nonnull @Override public InfoItemsPage getInitialPage() throws ExtractionException { - StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); + final StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); + + Page nextPage = null; if (getVideoTab() != null) { - JsonArray videos = getVideoTab().getObject("content").getObject("sectionListRenderer").getArray("contents") - .getObject(0).getObject("itemSectionRenderer").getArray("contents").getObject(0) - .getObject("gridRenderer").getArray("items"); - collectStreamsFrom(collector, videos); + final JsonObject gridRenderer = getVideoTab().getObject("content").getObject("sectionListRenderer") + .getArray("contents").getObject(0).getObject("itemSectionRenderer") + .getArray("contents").getObject(0).getObject("gridRenderer"); + + collectStreamsFrom(collector, gridRenderer.getArray("items")); + + nextPage = getNextPageFrom(gridRenderer.getArray("continuations")); } - return new InfoItemsPage<>(collector, getNextPageUrl()); + return new InfoItemsPage<>(collector, nextPage); } @Override - public InfoItemsPage getPage(String pageUrl) throws IOException, ExtractionException { - if (isNullOrEmpty(pageUrl)) { - throw new ExtractionException(new IllegalArgumentException("Page url is empty or null")); + public InfoItemsPage getPage(final Page page) throws IOException, ExtractionException { + if (page == null || isNullOrEmpty(page.getUrl())) { + throw new IllegalArgumentException("Page doesn't contain an URL"); } // Unfortunately, we have to fetch the page even if we are only getting next streams, @@ -254,27 +255,26 @@ public class YoutubeChannelExtractor extends ChannelExtractor { fetchPage(); StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); - final JsonArray ajaxJson = getJsonResponse(pageUrl, getExtractorLocalization()); + final JsonArray ajaxJson = getJsonResponse(page.getUrl(), getExtractorLocalization()); JsonObject sectionListContinuation = ajaxJson.getObject(1).getObject("response") .getObject("continuationContents").getObject("gridContinuation"); collectStreamsFrom(collector, sectionListContinuation.getArray("items")); - return new InfoItemsPage<>(collector, getNextPageUrlFrom(sectionListContinuation.getArray("continuations"))); + return new InfoItemsPage<>(collector, getNextPageFrom(sectionListContinuation.getArray("continuations"))); } - - private String getNextPageUrlFrom(JsonArray continuations) { + private Page getNextPageFrom(final JsonArray continuations) { if (isNullOrEmpty(continuations)) { - return ""; + return null; } - JsonObject nextContinuationData = continuations.getObject(0).getObject("nextContinuationData"); - String continuation = nextContinuationData.getString("continuation"); - String clickTrackingParams = nextContinuationData.getString("clickTrackingParams"); - return "https://www.youtube.com/browse_ajax?ctoken=" + continuation + "&continuation=" + continuation - + "&itct=" + clickTrackingParams; + final JsonObject nextContinuationData = continuations.getObject(0).getObject("nextContinuationData"); + final String continuation = nextContinuationData.getString("continuation"); + final String clickTrackingParams = nextContinuationData.getString("clickTrackingParams"); + return new Page("https://www.youtube.com/browse_ajax?ctoken=" + continuation + + "&continuation=" + continuation + "&itct=" + clickTrackingParams); } private void collectStreamsFrom(StreamInfoItemsCollector collector, JsonArray videos) throws ParsingException { diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeCommentsExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeCommentsExtractor.java index 1dea2952b..287e74212 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeCommentsExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeCommentsExtractor.java @@ -3,6 +3,8 @@ package org.schabi.newpipe.extractor.services.youtube.extractors; import com.grack.nanojson.JsonArray; import com.grack.nanojson.JsonObject; import com.grack.nanojson.JsonParser; + +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.comments.CommentsExtractor; import org.schabi.newpipe.extractor.comments.CommentsInfoItem; @@ -17,7 +19,6 @@ import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler; import org.schabi.newpipe.extractor.utils.JsonUtils; import org.schabi.newpipe.extractor.utils.Parser; -import javax.annotation.Nonnull; import java.io.IOException; import java.io.UnsupportedEncodingException; import java.net.URLEncoder; @@ -26,19 +27,19 @@ import java.util.List; import java.util.Map; import java.util.regex.Pattern; +import javax.annotation.Nonnull; + import static java.util.Collections.singletonList; import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty; - public class YoutubeCommentsExtractor extends CommentsExtractor { - // using the mobile site for comments because it loads faster and uses get requests instead of post private static final String USER_AGENT = "Mozilla/5.0 (Android 8.1.0; Mobile; rv:62.0) Gecko/62.0 Firefox/62.0"; private static final Pattern YT_CLIENT_NAME_PATTERN = Pattern.compile("INNERTUBE_CONTEXT_CLIENT_NAME\\\":(.*?)[,}]"); private String ytClientVersion; private String ytClientName; - private InfoItemsPage initPage; + private String responseBody; public YoutubeCommentsExtractor(StreamingService service, ListLinkHandler uiHandler) { super(service, uiHandler); @@ -46,56 +47,49 @@ public class YoutubeCommentsExtractor extends CommentsExtractor { @Override public InfoItemsPage getInitialPage() throws IOException, ExtractionException { - // initial page does not load any comments but is required to get comments token - super.fetchPage(); - return initPage; + String commentsTokenInside = findValue(responseBody, "commentSectionRenderer", "}"); + String commentsToken = findValue(commentsTokenInside, "continuation\":\"", "\""); + return getPage(getNextPage(commentsToken)); } - @Override - public String getNextPageUrl() throws IOException, ExtractionException { - // initial page does not load any comments but is required to get comments token - super.fetchPage(); - return initPage.getNextPageUrl(); - } - - private String getNextPageUrl(JsonObject ajaxJson) throws IOException, ParsingException { - + private Page getNextPage(JsonObject ajaxJson) throws ParsingException { JsonArray arr; try { arr = JsonUtils.getArray(ajaxJson, "response.continuationContents.commentSectionContinuation.continuations"); } catch (Exception e) { - return ""; + return null; } if (arr.isEmpty()) { - return ""; + return null; } String continuation; try { continuation = JsonUtils.getString(arr.getObject(0), "nextContinuationData.continuation"); } catch (Exception e) { - return ""; + return null; } - return getNextPageUrl(continuation); + return getNextPage(continuation); } - private String getNextPageUrl(String continuation) throws ParsingException { + private Page getNextPage(String continuation) throws ParsingException { Map params = new HashMap<>(); params.put("action_get_comments", "1"); params.put("pbj", "1"); params.put("ctoken", continuation); try { - return "https://m.youtube.com/watch_comment?" + getDataString(params); + return new Page("https://m.youtube.com/watch_comment?" + getDataString(params)); } catch (UnsupportedEncodingException e) { throw new ParsingException("Could not get next page url", e); } } @Override - public InfoItemsPage getPage(String pageUrl) throws IOException, ExtractionException { - if (isNullOrEmpty(pageUrl)) { - throw new ExtractionException(new IllegalArgumentException("Page url is empty or null")); + public InfoItemsPage getPage(final Page page) throws IOException, ExtractionException { + if (page == null || isNullOrEmpty(page.getUrl())) { + throw new IllegalArgumentException("Page doesn't contain an URL"); } - String ajaxResponse = makeAjaxRequest(pageUrl); + + String ajaxResponse = makeAjaxRequest(page.getUrl()); JsonObject ajaxJson; try { ajaxJson = JsonParser.array().from(ajaxResponse).getObject(1); @@ -104,11 +98,10 @@ public class YoutubeCommentsExtractor extends CommentsExtractor { } CommentsInfoItemsCollector collector = new CommentsInfoItemsCollector(getServiceId()); collectCommentsFrom(collector, ajaxJson); - return new InfoItemsPage<>(collector, getNextPageUrl(ajaxJson)); + return new InfoItemsPage<>(collector, getNextPage(ajaxJson)); } private void collectCommentsFrom(CommentsInfoItemsCollector collector, JsonObject ajaxJson) throws ParsingException { - JsonArray contents; try { contents = JsonUtils.getArray(ajaxJson, "response.continuationContents.commentSectionContinuation.items"); @@ -136,16 +129,13 @@ public class YoutubeCommentsExtractor extends CommentsExtractor { final Map> requestHeaders = new HashMap<>(); requestHeaders.put("User-Agent", singletonList(USER_AGENT)); final Response response = downloader.get(getUrl(), requestHeaders, getExtractorLocalization()); - String responseBody = response.responseBody(); + responseBody = response.responseBody(); ytClientVersion = findValue(responseBody, "INNERTUBE_CONTEXT_CLIENT_VERSION\":\"", "\""); ytClientName = Parser.matchGroup1(YT_CLIENT_NAME_PATTERN, responseBody); - String commentsTokenInside = findValue(responseBody, "commentSectionRenderer", "}"); - String commentsToken = findValue(commentsTokenInside, "continuation\":\"", "\""); - initPage = getPage(getNextPageUrl(commentsToken)); } - private String makeAjaxRequest(String siteUrl) throws IOException, ReCaptchaException { + private String makeAjaxRequest(String siteUrl) throws IOException, ReCaptchaException { Map> requestHeaders = new HashMap<>(); requestHeaders.put("Accept", singletonList("*/*")); requestHeaders.put("User-Agent", singletonList(USER_AGENT)); @@ -174,22 +164,4 @@ public class YoutubeCommentsExtractor extends CommentsExtractor { int endIndex = doc.indexOf(end, beginIndex); return doc.substring(beginIndex, endIndex); } - - public static String getYoutubeText(@Nonnull JsonObject object) throws ParsingException { - try { - return JsonUtils.getString(object, "simpleText"); - } catch (Exception e1) { - try { - JsonArray arr = JsonUtils.getArray(object, "runs"); - String result = ""; - for (int i = 0; i < arr.size(); i++) { - result = result + JsonUtils.getString(arr.getObject(i), "text"); - } - return result; - } catch (Exception e2) { - return ""; - } - } - } - } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeCommentsInfoItemExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeCommentsInfoItemExtractor.java index 9d659f5d3..913022440 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeCommentsInfoItemExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeCommentsInfoItemExtractor.java @@ -2,6 +2,7 @@ package org.schabi.newpipe.extractor.services.youtube.extractors; import com.grack.nanojson.JsonArray; import com.grack.nanojson.JsonObject; + import org.schabi.newpipe.extractor.comments.CommentsInfoItemExtractor; import org.schabi.newpipe.extractor.exceptions.ParsingException; import org.schabi.newpipe.extractor.localization.DateWrapper; @@ -11,7 +12,7 @@ import org.schabi.newpipe.extractor.utils.Utils; import javax.annotation.Nullable; -import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty; +import static org.schabi.newpipe.extractor.services.youtube.YoutubeParsingHelper.getTextFromObject; public class YoutubeCommentsInfoItemExtractor implements CommentsInfoItemExtractor { @@ -43,7 +44,7 @@ public class YoutubeCommentsInfoItemExtractor implements CommentsInfoItemExtract @Override public String getName() throws ParsingException { try { - return YoutubeCommentsExtractor.getYoutubeText(JsonUtils.getObject(json, "authorText")); + return getTextFromObject(JsonUtils.getObject(json, "authorText")); } catch (Exception e) { return ""; } @@ -52,7 +53,7 @@ public class YoutubeCommentsInfoItemExtractor implements CommentsInfoItemExtract @Override public String getTextualUploadDate() throws ParsingException { try { - return YoutubeCommentsExtractor.getYoutubeText(JsonUtils.getObject(json, "publishedTimeText")); + return getTextFromObject(JsonUtils.getObject(json, "publishedTimeText")); } catch (Exception e) { throw new ParsingException("Could not get publishedTimeText", e); } @@ -72,7 +73,7 @@ public class YoutubeCommentsInfoItemExtractor implements CommentsInfoItemExtract @Override public int getLikeCount() throws ParsingException { try { - return JsonUtils.getNumber(json, "likeCount").intValue(); + return json.getInt("likeCount"); } catch (Exception e) { throw new ParsingException("Could not get like count", e); } @@ -81,7 +82,7 @@ public class YoutubeCommentsInfoItemExtractor implements CommentsInfoItemExtract @Override public String getCommentText() throws ParsingException { try { - String commentText = YoutubeCommentsExtractor.getYoutubeText(JsonUtils.getObject(json, "contentText")); + String commentText = getTextFromObject(JsonUtils.getObject(json, "contentText")); // youtube adds U+FEFF in some comments. eg. https://www.youtube.com/watch?v=Nj4F63E59io return Utils.removeUTF8BOM(commentText); } catch (Exception e) { @@ -111,7 +112,7 @@ public class YoutubeCommentsInfoItemExtractor implements CommentsInfoItemExtract @Override public String getUploaderName() throws ParsingException { try { - return YoutubeCommentsExtractor.getYoutubeText(JsonUtils.getObject(json, "authorText")); + return getTextFromObject(JsonUtils.getObject(json, "authorText")); } catch (Exception e) { return ""; } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeFeedExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeFeedExtractor.java index 230915f72..65a0ffd60 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeFeedExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeFeedExtractor.java @@ -5,6 +5,7 @@ import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import org.jsoup.select.Elements; import org.schabi.newpipe.extractor.ListExtractor; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.downloader.Downloader; import org.schabi.newpipe.extractor.downloader.Response; @@ -15,9 +16,10 @@ import org.schabi.newpipe.extractor.services.youtube.YoutubeParsingHelper; import org.schabi.newpipe.extractor.stream.StreamInfoItem; import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector; -import javax.annotation.Nonnull; import java.io.IOException; +import javax.annotation.Nonnull; + public class YoutubeFeedExtractor extends FeedExtractor { public YoutubeFeedExtractor(StreamingService service, ListLinkHandler linkHandler) { super(service, linkHandler); @@ -66,17 +68,7 @@ public class YoutubeFeedExtractor extends FeedExtractor { } @Override - public String getNextPageUrl() { - return null; - } - - @Override - public InfoItemsPage getPage(String pageUrl) { - return null; - } - - @Override - public boolean hasNextPage() { - return false; + public InfoItemsPage getPage(final Page page) { + return InfoItemsPage.emptyPage(); } } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeMusicSearchExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeMusicSearchExtractor.java index 6d26e8d6a..249a2dae1 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeMusicSearchExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeMusicSearchExtractor.java @@ -7,6 +7,7 @@ import com.grack.nanojson.JsonParserException; import com.grack.nanojson.JsonWriter; import org.schabi.newpipe.extractor.InfoItem; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.downloader.Downloader; import org.schabi.newpipe.extractor.exceptions.ExtractionException; @@ -169,32 +170,25 @@ public class YoutubeMusicSearchExtractor extends SearchExtractor { final JsonArray contents = initialData.getObject("contents").getObject("sectionListRenderer").getArray("contents"); - for (Object content : contents) { - if (((JsonObject) content).has("musicShelfRenderer")) { - collectMusicStreamsFrom(collector, ((JsonObject) content).getObject("musicShelfRenderer").getArray("contents")); - } - } - - return new InfoItemsPage<>(collector, getNextPageUrl()); - } - - @Override - public String getNextPageUrl() throws ExtractionException, IOException { - final JsonArray contents = initialData.getObject("contents").getObject("sectionListRenderer").getArray("contents"); + Page nextPage = null; for (Object content : contents) { if (((JsonObject) content).has("musicShelfRenderer")) { - return getNextPageUrlFrom(((JsonObject) content).getObject("musicShelfRenderer").getArray("continuations")); + final JsonObject musicShelfRenderer = ((JsonObject) content).getObject("musicShelfRenderer"); + + collectMusicStreamsFrom(collector, musicShelfRenderer.getArray("contents")); + + nextPage = getNextPageFrom(musicShelfRenderer.getArray("continuations")); } } - return ""; + return new InfoItemsPage<>(collector, nextPage); } @Override - public InfoItemsPage getPage(final String pageUrl) throws IOException, ExtractionException { - if (isNullOrEmpty(pageUrl)) { - throw new ExtractionException(new IllegalArgumentException("Page url is empty or null")); + public InfoItemsPage getPage(final Page page) throws IOException, ExtractionException { + if (page == null || isNullOrEmpty(page.getUrl())) { + throw new IllegalArgumentException("Page doesn't contain an URL"); } final InfoItemsSearchCollector collector = new InfoItemsSearchCollector(getServiceId()); @@ -236,7 +230,7 @@ public class YoutubeMusicSearchExtractor extends SearchExtractor { headers.put("Referer", Collections.singletonList("music.youtube.com")); headers.put("Content-Type", Collections.singletonList("application/json")); - final String responseBody = getValidJsonResponseBody(getDownloader().post(pageUrl, headers, json)); + final String responseBody = getValidJsonResponseBody(getDownloader().post(page.getUrl(), headers, json)); final JsonObject ajaxJson; try { @@ -250,7 +244,7 @@ public class YoutubeMusicSearchExtractor extends SearchExtractor { collectMusicStreamsFrom(collector, musicShelfContinuation.getArray("contents")); final JsonArray continuations = musicShelfContinuation.getArray("continuations"); - return new InfoItemsPage<>(collector, getNextPageUrlFrom(continuations)); + return new InfoItemsPage<>(collector, getNextPageFrom(continuations)); } private void collectMusicStreamsFrom(final InfoItemsSearchCollector collector, final JsonArray videos) { @@ -495,16 +489,17 @@ public class YoutubeMusicSearchExtractor extends SearchExtractor { } } - private String getNextPageUrlFrom(final JsonArray continuations) throws ParsingException, IOException, ReCaptchaException { + private Page getNextPageFrom(final JsonArray continuations) throws ParsingException, IOException, ReCaptchaException { if (isNullOrEmpty(continuations)) { - return ""; + return null; } final JsonObject nextContinuationData = continuations.getObject(0).getObject("nextContinuationData"); final String continuation = nextContinuationData.getString("continuation"); final String clickTrackingParams = nextContinuationData.getString("clickTrackingParams"); - return "https://music.youtube.com/youtubei/v1/search?ctoken=" + continuation + "&continuation=" + continuation - + "&itct=" + clickTrackingParams + "&alt=json&key=" + YoutubeParsingHelper.getYoutubeMusicKeys()[0]; + return new Page("https://music.youtube.com/youtubei/v1/search?ctoken=" + continuation + + "&continuation=" + continuation + "&itct=" + clickTrackingParams + "&alt=json" + + "&key=" + YoutubeParsingHelper.getYoutubeMusicKeys()[0]); } } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubePlaylistExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubePlaylistExtractor.java index 12d6d49e6..c55707233 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubePlaylistExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubePlaylistExtractor.java @@ -3,6 +3,7 @@ package org.schabi.newpipe.extractor.services.youtube.extractors; import com.grack.nanojson.JsonArray; import com.grack.nanojson.JsonObject; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.downloader.Downloader; import org.schabi.newpipe.extractor.exceptions.ExtractionException; @@ -53,7 +54,7 @@ public class YoutubePlaylistExtractor extends PlaylistExtractor { } private JsonObject getUploaderInfo() throws ParsingException { - JsonArray items = initialData.getObject("sidebar").getObject("playlistSidebarRenderer").getArray("items"); + final JsonArray items = initialData.getObject("sidebar").getObject("playlistSidebarRenderer").getArray("items"); JsonObject videoOwner = items.getObject(1).getObject("playlistSidebarSecondaryInfoRenderer").getObject("videoOwner"); if (videoOwner.has("videoOwnerRenderer")) { @@ -77,19 +78,10 @@ public class YoutubePlaylistExtractor extends PlaylistExtractor { } } - @Override - public String getNextPageUrl() { - return getNextPageUrlFrom(initialData.getObject("contents").getObject("twoColumnBrowseResultsRenderer") - .getArray("tabs").getObject(0).getObject("tabRenderer").getObject("content") - .getObject("sectionListRenderer").getArray("contents").getObject(0) - .getObject("itemSectionRenderer").getArray("contents").getObject(0) - .getObject("playlistVideoListRenderer").getArray("continuations")); - } - @Nonnull @Override public String getName() throws ParsingException { - String name = getTextFromObject(playlistInfo.getObject("title")); + final String name = getTextFromObject(playlistInfo.getObject("title")); if (name != null && !name.isEmpty()) return name; return initialData.getObject("microformat").getObject("microformatDataRenderer").getString("title"); @@ -138,7 +130,7 @@ public class YoutubePlaylistExtractor extends PlaylistExtractor { @Override public String getUploaderAvatarUrl() throws ParsingException { try { - String url = getUploaderInfo().getObject("thumbnail").getArray("thumbnails").getObject(0).getString("url"); + final String url = getUploaderInfo().getObject("thumbnail").getArray("thumbnails").getObject(0).getString("url"); return fixThumbnailUrl(url); } catch (Exception e) { @@ -149,7 +141,7 @@ public class YoutubePlaylistExtractor extends PlaylistExtractor { @Override public long getStreamCount() throws ParsingException { try { - String viewsText = getTextFromObject(getPlaylistInfo().getArray("stats").getObject(0)); + final String viewsText = getTextFromObject(getPlaylistInfo().getArray("stats").getObject(0)); return Long.parseLong(Utils.removeNonDigitCharacters(viewsText)); } catch (Exception e) { throw new ParsingException("Could not get video count from playlist", e); @@ -178,6 +170,7 @@ public class YoutubePlaylistExtractor extends PlaylistExtractor { @Override public InfoItemsPage getInitialPage() { final StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); + Page nextPage = null; final JsonArray contents = initialData.getObject("contents").getObject("twoColumnBrowseResultsRenderer") .getArray("tabs").getObject(0).getObject("tabRenderer").getObject("content") @@ -193,48 +186,51 @@ public class YoutubePlaylistExtractor extends PlaylistExtractor { .getObject("videoList").getObject("playlistVideoListRenderer").getArray("contents")); } } + + return new InfoItemsPage<>(collector, null); } else if (contents.getObject(0).has("playlistVideoListRenderer")) { - final JsonArray videos = contents.getObject(0) - .getObject("playlistVideoListRenderer").getArray("contents"); - collectStreamsFrom(collector, videos); + final JsonObject videos = contents.getObject(0).getObject("playlistVideoListRenderer"); + collectStreamsFrom(collector, videos.getArray("contents")); + + nextPage = getNextPageFrom(videos.getArray("continuations")); } - return new InfoItemsPage<>(collector, getNextPageUrl()); + return new InfoItemsPage<>(collector, nextPage); } @Override - public InfoItemsPage getPage(final String pageUrl) throws IOException, ExtractionException { - if (isNullOrEmpty(pageUrl)) { - throw new ExtractionException(new IllegalArgumentException("Page url is empty or null")); + public InfoItemsPage getPage(final Page page) throws IOException, ExtractionException { + if (page == null || isNullOrEmpty(page.getUrl())) { + throw new IllegalArgumentException("Page doesn't contain an URL"); } final StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); - final JsonArray ajaxJson = getJsonResponse(pageUrl, getExtractorLocalization()); + final JsonArray ajaxJson = getJsonResponse(page.getUrl(), getExtractorLocalization()); final JsonObject sectionListContinuation = ajaxJson.getObject(1).getObject("response") .getObject("continuationContents").getObject("playlistVideoListContinuation"); collectStreamsFrom(collector, sectionListContinuation.getArray("contents")); - return new InfoItemsPage<>(collector, getNextPageUrlFrom(sectionListContinuation.getArray("continuations"))); + return new InfoItemsPage<>(collector, getNextPageFrom(sectionListContinuation.getArray("continuations"))); } - private String getNextPageUrlFrom(final JsonArray continuations) { + private Page getNextPageFrom(final JsonArray continuations) { if (isNullOrEmpty(continuations)) { - return ""; + return null; } - JsonObject nextContinuationData = continuations.getObject(0).getObject("nextContinuationData"); - String continuation = nextContinuationData.getString("continuation"); - String clickTrackingParams = nextContinuationData.getString("clickTrackingParams"); - return "https://www.youtube.com/browse_ajax?ctoken=" + continuation + "&continuation=" + continuation - + "&itct=" + clickTrackingParams; + final JsonObject nextContinuationData = continuations.getObject(0).getObject("nextContinuationData"); + final String continuation = nextContinuationData.getString("continuation"); + final String clickTrackingParams = nextContinuationData.getString("clickTrackingParams"); + return new Page("https://www.youtube.com/browse_ajax?ctoken=" + continuation + "&continuation=" + continuation + + "&itct=" + clickTrackingParams); } private void collectStreamsFrom(final StreamInfoItemsCollector collector, final JsonArray videos) { final TimeAgoParser timeAgoParser = getTimeAgoParser(); - for (Object video : videos) { + for (final Object video : videos) { if (((JsonObject) video).has("playlistVideoRenderer")) { collector.commit(new YoutubeStreamInfoItemExtractor(((JsonObject) video).getObject("playlistVideoRenderer"), timeAgoParser) { @Override diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeSearchExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeSearchExtractor.java index 560943628..6e7d41c48 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeSearchExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeSearchExtractor.java @@ -2,7 +2,12 @@ package org.schabi.newpipe.extractor.services.youtube.extractors; import com.grack.nanojson.JsonArray; import com.grack.nanojson.JsonObject; +import com.grack.nanojson.JsonParser; +import com.grack.nanojson.JsonParserException; +import com.grack.nanojson.JsonWriter; + import org.schabi.newpipe.extractor.InfoItem; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.downloader.Downloader; import org.schabi.newpipe.extractor.exceptions.ExtractionException; @@ -13,11 +18,19 @@ import org.schabi.newpipe.extractor.search.InfoItemsSearchCollector; import org.schabi.newpipe.extractor.search.SearchExtractor; import org.schabi.newpipe.extractor.utils.JsonUtils; -import javax.annotation.Nonnull; import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import javax.annotation.Nonnull; + +import static org.schabi.newpipe.extractor.services.youtube.YoutubeParsingHelper.getClientVersion; import static org.schabi.newpipe.extractor.services.youtube.YoutubeParsingHelper.getJsonResponse; +import static org.schabi.newpipe.extractor.services.youtube.YoutubeParsingHelper.getKey; import static org.schabi.newpipe.extractor.services.youtube.YoutubeParsingHelper.getTextFromObject; +import static org.schabi.newpipe.extractor.services.youtube.YoutubeParsingHelper.getValidJsonResponseBody; import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty; /* @@ -95,42 +108,88 @@ public class YoutubeSearchExtractor extends SearchExtractor { @Nonnull @Override - public InfoItemsPage getInitialPage() throws ExtractionException { + public InfoItemsPage getInitialPage() throws IOException, ExtractionException { final InfoItemsSearchCollector collector = new InfoItemsSearchCollector(getServiceId()); final JsonArray sections = initialData.getObject("contents").getObject("twoColumnSearchResultsRenderer") .getObject("primaryContents").getObject("sectionListRenderer").getArray("contents"); - for (Object section : sections) { - collectStreamsFrom(collector, ((JsonObject) section).getObject("itemSectionRenderer").getArray("contents")); + Page nextPage = null; + + for (final Object section : sections) { + if (((JsonObject) section).has("itemSectionRenderer")) { + final JsonObject itemSectionRenderer = ((JsonObject) section).getObject("itemSectionRenderer"); + + collectStreamsFrom(collector, itemSectionRenderer.getArray("contents")); + + nextPage = getNextPageFrom(itemSectionRenderer.getArray("continuations")); + } else if (((JsonObject) section).has("continuationItemRenderer")) { + nextPage = getNewNextPageFrom(((JsonObject) section).getObject("continuationItemRenderer")); + } } - return new InfoItemsPage<>(collector, getNextPageUrl()); + return new InfoItemsPage<>(collector, nextPage); } @Override - public String getNextPageUrl() throws ExtractionException { - return getNextPageUrlFrom(initialData.getObject("contents").getObject("twoColumnSearchResultsRenderer") - .getObject("primaryContents").getObject("sectionListRenderer").getArray("contents") - .getObject(0).getObject("itemSectionRenderer").getArray("continuations")); - } - - @Override - public InfoItemsPage getPage(final String pageUrl) throws IOException, ExtractionException { - if (isNullOrEmpty(pageUrl)) { - throw new ExtractionException(new IllegalArgumentException("Page url is empty or null")); + public InfoItemsPage getPage(final Page page) throws IOException, ExtractionException { + if (page == null || isNullOrEmpty(page.getUrl())) { + throw new IllegalArgumentException("Page doesn't contain an URL"); } final InfoItemsSearchCollector collector = new InfoItemsSearchCollector(getServiceId()); - final JsonArray ajaxJson = getJsonResponse(pageUrl, getExtractorLocalization()); - final JsonObject itemSectionRenderer = ajaxJson.getObject(1).getObject("response") - .getObject("continuationContents").getObject("itemSectionContinuation"); + if (page.getId() == null) { + final JsonArray ajaxJson = getJsonResponse(page.getUrl(), getExtractorLocalization()); - collectStreamsFrom(collector, itemSectionRenderer.getArray("contents")); - final JsonArray continuations = itemSectionRenderer.getArray("continuations"); + final JsonObject itemSectionContinuation = ajaxJson.getObject(1).getObject("response") + .getObject("continuationContents").getObject("itemSectionContinuation"); - return new InfoItemsPage<>(collector, getNextPageUrlFrom(continuations)); + collectStreamsFrom(collector, itemSectionContinuation.getArray("contents")); + final JsonArray continuations = itemSectionContinuation.getArray("continuations"); + + return new InfoItemsPage<>(collector, getNextPageFrom(continuations)); + } else { + // @formatter:off + final byte[] json = JsonWriter.string() + .object() + .object("context") + .object("client") + .value("hl", "en") + .value("gl", getExtractorContentCountry().getCountryCode()) + .value("clientName", "WEB") + .value("clientVersion", getClientVersion()) + .value("utcOffsetMinutes", 0) + .end() + .object("request").end() + .object("user").end() + .end() + .value("continuation", page.getId()) + .end().done().getBytes("UTF-8"); + // @formatter:on + + final Map> headers = new HashMap<>(); + headers.put("Origin", Collections.singletonList("https://www.youtube.com")); + headers.put("Referer", Collections.singletonList(this.getUrl())); + headers.put("Content-Type", Collections.singletonList("application/json")); + + final String responseBody = getValidJsonResponseBody(getDownloader().post(page.getUrl(), headers, json)); + + final JsonObject ajaxJson; + try { + ajaxJson = JsonParser.object().from(responseBody); + } catch (JsonParserException e) { + throw new ParsingException("Could not parse JSON", e); + } + + final JsonArray continuationItems = ajaxJson.getArray("onResponseReceivedCommands") + .getObject(0).getObject("appendContinuationItemsAction").getArray("continuationItems"); + + final JsonArray contents = continuationItems.getObject(0).getObject("itemSectionRenderer").getArray("contents"); + collectStreamsFrom(collector, contents); + + return new InfoItemsPage<>(collector, getNewNextPageFrom(continuationItems.getObject(1).getObject("continuationItemRenderer"))); + } } private void collectStreamsFrom(final InfoItemsSearchCollector collector, final JsonArray videos) throws NothingFoundException, ParsingException { @@ -150,16 +209,29 @@ public class YoutubeSearchExtractor extends SearchExtractor { } } - private String getNextPageUrlFrom(final JsonArray continuations) throws ParsingException { + private Page getNextPageFrom(final JsonArray continuations) throws ParsingException { if (isNullOrEmpty(continuations)) { - return ""; + return null; } final JsonObject nextContinuationData = continuations.getObject(0).getObject("nextContinuationData"); final String continuation = nextContinuationData.getString("continuation"); final String clickTrackingParams = nextContinuationData.getString("clickTrackingParams"); - return getUrl() + "&pbj=1&ctoken=" + continuation + "&continuation=" + continuation - + "&itct=" + clickTrackingParams; + return new Page(getUrl() + "&pbj=1&ctoken=" + continuation + "&continuation=" + continuation + + "&itct=" + clickTrackingParams); + } + + private Page getNewNextPageFrom(final JsonObject continuationItemRenderer) throws IOException, ExtractionException { + if (isNullOrEmpty(continuationItemRenderer)) { + return null; + } + + final String token = continuationItemRenderer.getObject("continuationEndpoint") + .getObject("continuationCommand").getString("token"); + + final String url = "https://www.youtube.com/youtubei/v1/search?key=" + getKey(); + + return new Page(url, token); } } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeStreamExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeStreamExtractor.java index bdc2a10f3..93bd2121e 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeStreamExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeStreamExtractor.java @@ -21,14 +21,14 @@ import org.schabi.newpipe.extractor.localization.Localization; import org.schabi.newpipe.extractor.localization.TimeAgoParser; import org.schabi.newpipe.extractor.localization.TimeAgoPatternsManager; import org.schabi.newpipe.extractor.services.youtube.ItagItem; -import org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeChannelLinkHandlerFactory; import org.schabi.newpipe.extractor.services.youtube.YoutubeParsingHelper; +import org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeChannelLinkHandlerFactory; import org.schabi.newpipe.extractor.stream.AudioStream; import org.schabi.newpipe.extractor.stream.Description; import org.schabi.newpipe.extractor.stream.Frameset; import org.schabi.newpipe.extractor.stream.Stream; import org.schabi.newpipe.extractor.stream.StreamExtractor; -import org.schabi.newpipe.extractor.stream.StreamInfoItem; +import org.schabi.newpipe.extractor.stream.StreamInfoItemExtractor; import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector; import org.schabi.newpipe.extractor.stream.StreamType; import org.schabi.newpipe.extractor.stream.SubtitlesStream; @@ -52,7 +52,10 @@ import java.util.Map; import javax.annotation.Nonnull; import javax.annotation.Nullable; -import static org.schabi.newpipe.extractor.services.youtube.YoutubeParsingHelper.*; +import static org.schabi.newpipe.extractor.services.youtube.YoutubeParsingHelper.fixThumbnailUrl; +import static org.schabi.newpipe.extractor.services.youtube.YoutubeParsingHelper.getJsonResponse; +import static org.schabi.newpipe.extractor.services.youtube.YoutubeParsingHelper.getTextFromObject; +import static org.schabi.newpipe.extractor.services.youtube.YoutubeParsingHelper.getUrlFromNavigationEndpoint; import static org.schabi.newpipe.extractor.utils.JsonUtils.EMPTY_STRING; import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty; @@ -115,7 +118,13 @@ public class YoutubeStreamExtractor extends StreamExtractor { @Override public String getName() throws ParsingException { assertPageFetched(); - String title = getTextFromObject(getVideoPrimaryInfoRenderer().getObject("title")); + String title = null; + + try { + title = getTextFromObject(getVideoPrimaryInfoRenderer().getObject("title")); + } catch (ParsingException ignored) { + // age-restricted videos cause a ParsingException here + } if (isNullOrEmpty(title)) { title = playerResponse.getObject("videoDetails").getString("title"); @@ -193,11 +202,15 @@ public class YoutubeStreamExtractor extends StreamExtractor { @Nonnull @Override - public Description getDescription() throws ParsingException { + public Description getDescription() { assertPageFetched(); // description with more info on links - String description = getTextFromObject(getVideoSecondaryInfoRenderer().getObject("description"), true); - if (description != null && !description.isEmpty()) return new Description(description, Description.HTML); + try { + String description = getTextFromObject(getVideoSecondaryInfoRenderer().getObject("description"), true); + if (description != null && !description.isEmpty()) return new Description(description, Description.HTML); + } catch (ParsingException ignored) { + // age-restricted videos cause a ParsingException here + } // raw non-html description return new Description(playerResponse.getObject("videoDetails").getString("shortDescription"), Description.PLAIN_TEXT); @@ -240,14 +253,20 @@ public class YoutubeStreamExtractor extends StreamExtractor { */ @Override public long getTimeStamp() throws ParsingException { - return getTimestampSeconds("((#|&|\\?)t=\\d{0,3}h?\\d{0,3}m?\\d{1,3}s?)"); + return getTimestampSeconds("((#|&|\\?)(t|start)=\\d{0,3}h?\\d{0,3}m?\\d{1,3}s?)"); } @Override public long getViewCount() throws ParsingException { assertPageFetched(); - String views = getTextFromObject(getVideoPrimaryInfoRenderer().getObject("viewCount") + String views = null; + + try { + views = getTextFromObject(getVideoPrimaryInfoRenderer().getObject("viewCount") .getObject("videoViewCountRenderer").getObject("viewCount")); + } catch (ParsingException ignored) { + // age-restricted videos cause a ParsingException here + } if (isNullOrEmpty(views)) { views = playerResponse.getObject("videoDetails").getString("viewCount"); @@ -279,6 +298,7 @@ public class YoutubeStreamExtractor extends StreamExtractor { } catch (NumberFormatException nfe) { throw new ParsingException("Could not parse \"" + likesString + "\" as an Integer", nfe); } catch (Exception e) { + if (ageLimit == 18) return -1; throw new ParsingException("Could not get like count", e); } } @@ -302,6 +322,7 @@ public class YoutubeStreamExtractor extends StreamExtractor { } catch (NumberFormatException nfe) { throw new ParsingException("Could not parse \"" + dislikesString + "\" as an Integer", nfe); } catch (Exception e) { + if (ageLimit == 18) return -1; throw new ParsingException("Could not get dislike count", e); } } @@ -311,14 +332,20 @@ public class YoutubeStreamExtractor extends StreamExtractor { public String getUploaderUrl() throws ParsingException { assertPageFetched(); + try { String uploaderUrl = getUrlFromNavigationEndpoint(getVideoSecondaryInfoRenderer() .getObject("owner").getObject("videoOwnerRenderer").getObject("navigationEndpoint")); - if (uploaderUrl != null && !uploaderUrl.isEmpty()) return uploaderUrl; - + if (!isNullOrEmpty(uploaderUrl)) { + return uploaderUrl; + } + } catch (ParsingException ignored) { + // age-restricted videos cause a ParsingException here + } String uploaderId = playerResponse.getObject("videoDetails").getString("channelId"); - if (uploaderId != null && !uploaderId.isEmpty()) + if (!isNullOrEmpty(uploaderId)) { return YoutubeChannelLinkHandlerFactory.getInstance().getUrl("channel/" + uploaderId); + } throw new ParsingException("Could not get uploader url"); } @@ -327,8 +354,13 @@ public class YoutubeStreamExtractor extends StreamExtractor { @Override public String getUploaderName() throws ParsingException { assertPageFetched(); - String uploaderName = getTextFromObject(getVideoSecondaryInfoRenderer().getObject("owner") + + String uploaderName = null; + + try { + uploaderName = getTextFromObject(getVideoSecondaryInfoRenderer().getObject("owner") .getObject("videoOwnerRenderer").getObject("title")); + } catch (ParsingException ignored) { } if (isNullOrEmpty(uploaderName)) { uploaderName = playerResponse.getObject("videoDetails").getString("author"); @@ -343,14 +375,22 @@ public class YoutubeStreamExtractor extends StreamExtractor { @Override public String getUploaderAvatarUrl() throws ParsingException { assertPageFetched(); - try { - String url = getVideoSecondaryInfoRenderer().getObject("owner").getObject("videoOwnerRenderer") - .getObject("thumbnail").getArray("thumbnails").getObject(0).getString("url"); - return fixThumbnailUrl(url); - } catch (Exception e) { - throw new ParsingException("Could not get uploader avatar url", e); + String url = null; + + try { + url = getVideoSecondaryInfoRenderer().getObject("owner").getObject("videoOwnerRenderer") + .getObject("thumbnail").getArray("thumbnails").getObject(0).getString("url"); + } catch (ParsingException ignored) { + // age-restricted videos cause a ParsingException here } + + if (isNullOrEmpty(url)) { + if (ageLimit == 18) return ""; + throw new ParsingException("Could not get uploader avatar URL"); + } + + return fixThumbnailUrl(url); } @Nonnull @@ -508,12 +548,7 @@ public class YoutubeStreamExtractor extends StreamExtractor { return StreamType.VIDEO_STREAM; } - @Override - public StreamInfoItem getNextStream() throws ExtractionException { - assertPageFetched(); - - if (getAgeLimit() != NO_AGE_LIMIT) return null; - + private StreamInfoItemExtractor getNextStream() throws ExtractionException { try { final JsonObject firstWatchNextItem = initialData.getObject("contents") .getObject("twoColumnWatchNextResults").getObject("secondaryResults") @@ -527,11 +562,7 @@ public class YoutubeStreamExtractor extends StreamExtractor { final JsonObject videoInfo = firstWatchNextItem.getObject("compactAutoplayRenderer") .getArray("contents").getObject(0).getObject("compactVideoRenderer"); - final TimeAgoParser timeAgoParser = getTimeAgoParser(); - StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); - - collector.commit(new YoutubeStreamInfoItemExtractor(videoInfo, timeAgoParser)); - return collector.getItems().get(0); + return new YoutubeStreamInfoItemExtractor(videoInfo, getTimeAgoParser()); } catch (Exception e) { throw new ParsingException("Could not get next video", e); } @@ -544,13 +575,19 @@ public class YoutubeStreamExtractor extends StreamExtractor { if (getAgeLimit() != NO_AGE_LIMIT) return null; try { - StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); - JsonArray results = initialData.getObject("contents").getObject("twoColumnWatchNextResults") + final StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); + + final StreamInfoItemExtractor nextStream = getNextStream(); + if (nextStream != null) { + collector.commit(nextStream); + } + + final JsonArray results = initialData.getObject("contents").getObject("twoColumnWatchNextResults") .getObject("secondaryResults").getObject("secondaryResults").getArray("results"); final TimeAgoParser timeAgoParser = getTimeAgoParser(); - for (Object ul : results) { + for (final Object ul : results) { if (((JsonObject) ul).has("compactVideoRenderer")) { collector.commit(new YoutubeStreamInfoItemExtractor(((JsonObject) ul).getObject("compactVideoRenderer"), timeAgoParser)); } @@ -583,14 +620,14 @@ public class YoutubeStreamExtractor extends StreamExtractor { private static final String HTTPS = "https:"; private static final String DECRYPTION_FUNC_NAME = "decrypt"; - private final static String DECRYPTION_SIGNATURE_FUNCTION_REGEX = - "([\\w$]+)\\s*=\\s*function\\((\\w+)\\)\\{\\s*\\2=\\s*\\2\\.split\\(\"\"\\)\\s*;"; - private final static String DECRYPTION_SIGNATURE_FUNCTION_REGEX_2 = - "\\b([\\w$]{2})\\s*=\\s*function\\((\\w+)\\)\\{\\s*\\2=\\s*\\2\\.split\\(\"\"\\)\\s*;"; - private final static String DECRYPTION_AKAMAIZED_STRING_REGEX = - "yt\\.akamaized\\.net/\\)\\s*\\|\\|\\s*.*?\\s*c\\s*&&\\s*d\\.set\\([^,]+\\s*,\\s*(:encodeURIComponent\\s*\\()([a-zA-Z0-9$]+)\\("; - private final static String DECRYPTION_AKAMAIZED_SHORT_STRING_REGEX = - "\\bc\\s*&&\\s*d\\.set\\([^,]+\\s*,\\s*(:encodeURIComponent\\s*\\()([a-zA-Z0-9$]+)\\("; + private final static String[] REGEXES = { + "(?:\\b|[^a-zA-Z0-9$])([a-zA-Z0-9$]{2})\\s*=\\s*function\\(\\s*a\\s*\\)\\s*\\{\\s*a\\s*=\\s*a\\.split\\(\\s*\"\"\\s*\\)", + "([\\w$]+)\\s*=\\s*function\\((\\w+)\\)\\{\\s*\\2=\\s*\\2\\.split\\(\"\"\\)\\s*;", + "\\b([\\w$]{2})\\s*=\\s*function\\((\\w+)\\)\\{\\s*\\2=\\s*\\2\\.split\\(\"\"\\)\\s*;", + "yt\\.akamaized\\.net/\\)\\s*\\|\\|\\s*.*?\\s*c\\s*&&\\s*d\\.set\\([^,]+\\s*,\\s*(:encodeURIComponent\\s*\\()([a-zA-Z0-9$]+)\\(", + "\\bc\\s*&&\\s*d\\.set\\([^,]+\\s*,\\s*(:encodeURIComponent\\s*\\()([a-zA-Z0-9$]+)\\(" + }; + ; private volatile String decryptionCode = ""; @@ -767,20 +804,15 @@ public class YoutubeStreamExtractor extends StreamExtractor { return result == null ? "" : result.toString(); } - private String getDecryptionFuncName(String playerCode) throws DecryptException { - String[] decryptionFuncNameRegexes = { - DECRYPTION_SIGNATURE_FUNCTION_REGEX_2, - DECRYPTION_SIGNATURE_FUNCTION_REGEX, - DECRYPTION_AKAMAIZED_SHORT_STRING_REGEX, - DECRYPTION_AKAMAIZED_STRING_REGEX - }; + private String getDecryptionFuncName(final String playerCode) throws DecryptException { Parser.RegexException exception = null; - for (String regex : decryptionFuncNameRegexes) { + for (final String regex : REGEXES) { try { return Parser.matchGroup1(regex, playerCode); } catch (Parser.RegexException re) { - if (exception == null) + if (exception == null) { exception = re; + } } } throw new DecryptException("Could not find decrypt function with any of the given patterns.", exception); @@ -872,7 +904,7 @@ public class YoutubeStreamExtractor extends StreamExtractor { } } - if (videoPrimaryInfoRenderer == null) { + if (isNullOrEmpty(videoPrimaryInfoRenderer)) { throw new ParsingException("Could not find videoPrimaryInfoRenderer"); } @@ -894,7 +926,7 @@ public class YoutubeStreamExtractor extends StreamExtractor { } } - if (videoSecondaryInfoRenderer == null) { + if (isNullOrEmpty(videoSecondaryInfoRenderer)) { throw new ParsingException("Could not find videoSecondaryInfoRenderer"); } @@ -904,6 +936,7 @@ public class YoutubeStreamExtractor extends StreamExtractor { @Nonnull private static String getVideoInfoUrl(final String id, final String sts) { + // TODO: Try parsing embedded_player_response first return "https://www.youtube.com/get_video_info?" + "video_id=" + id + "&eurl=https://youtube.googleapis.com/v/" + id + "&sts=" + sts + "&ps=default&gl=US&hl=en"; diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeTrendingExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeTrendingExtractor.java index ddd108e87..ac686e723 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeTrendingExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/extractors/YoutubeTrendingExtractor.java @@ -23,6 +23,7 @@ package org.schabi.newpipe.extractor.services.youtube.extractors; import com.grack.nanojson.JsonArray; import com.grack.nanojson.JsonObject; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.downloader.Downloader; import org.schabi.newpipe.extractor.exceptions.ExtractionException; @@ -61,13 +62,8 @@ public class YoutubeTrendingExtractor extends KioskExtractor { } @Override - public String getNextPageUrl() { - return ""; - } - - @Override - public InfoItemsPage getPage(String pageUrl) { - return null; + public InfoItemsPage getPage(final Page page) { + return InfoItemsPage.emptyPage(); } @Nonnull @@ -98,6 +94,7 @@ public class YoutubeTrendingExtractor extends KioskExtractor { collector.commit(new YoutubeStreamInfoItemExtractor(videoInfo, timeAgoParser)); } } - return new InfoItemsPage<>(collector, getNextPageUrl()); + + return new InfoItemsPage<>(collector, null); } } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/linkHandler/YoutubeStreamLinkHandlerFactory.java b/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/linkHandler/YoutubeStreamLinkHandlerFactory.java index 7f3e6824e..596de2dac 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/linkHandler/YoutubeStreamLinkHandlerFactory.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/services/youtube/linkHandler/YoutubeStreamLinkHandlerFactory.java @@ -1,7 +1,5 @@ package org.schabi.newpipe.extractor.services.youtube.linkHandler; -import static org.schabi.newpipe.extractor.services.youtube.YoutubeParsingHelper.BASE_YOUTUBE_INTENT_URL; - import org.schabi.newpipe.extractor.exceptions.FoundAdException; import org.schabi.newpipe.extractor.exceptions.ParsingException; import org.schabi.newpipe.extractor.linkhandler.LinkHandler; @@ -9,12 +7,13 @@ import org.schabi.newpipe.extractor.linkhandler.LinkHandlerFactory; import org.schabi.newpipe.extractor.services.youtube.YoutubeParsingHelper; import org.schabi.newpipe.extractor.utils.Utils; +import javax.annotation.Nullable; import java.net.MalformedURLException; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; -import javax.annotation.Nullable; +import static org.schabi.newpipe.extractor.services.youtube.YoutubeParsingHelper.BASE_YOUTUBE_INTENT_URL; /* * Created by Christian Schabesberger on 02.02.16. @@ -61,7 +60,7 @@ public class YoutubeStreamLinkHandlerFactory extends LinkHandlerFactory { @Override public LinkHandler fromUrl(String url) throws ParsingException { - if (url.startsWith(BASE_YOUTUBE_INTENT_URL)){ + if (url.startsWith(BASE_YOUTUBE_INTENT_URL)) { return super.fromUrl(url, BASE_YOUTUBE_INTENT_URL); } else { return super.fromUrl(url); @@ -191,17 +190,19 @@ public class YoutubeStreamLinkHandlerFactory extends LinkHandlerFactory { case "DEV.INVIDIO.US": case "INVIDIO.US": case "INVIDIOUS.SNOPYTA.ORG": - case "DE.INVIDIOUS.SNOPYTA.ORG": case "FI.INVIDIOUS.SNOPYTA.ORG": - case "VID.WXZM.SX": - case "INVIDIOUS.KABI.TK": - case "INVIDIOU.SH": - case "WWW.INVIDIOU.SH": - case "NO.INVIDIOU.SH": - case "INVIDIOUS.ENKIRTON.NET": - case "TUBE.POAL.CO": + case "YEWTU.BE": + case "INVIDIOUS.GGC-PROJECT.DE": + case "YT.MAISPUTAIN.OVH": case "INVIDIOUS.13AD.DE": - case "YT.ELUKERIO.ORG": { // code-block for hooktube.com and Invidious instances + case "INVIDIOUS.TOOT.KOELN": + case "INVIDIOUS.FDN.FR": + case "WATCH.NETTOHIKARI.COM": + case "INVIDIOUS.SNWMDS.NET": + case "INVIDIOUS.SNWMDS.ORG": + case "INVIDIOUS.SNWMDS.COM": + case "INVIDIOUS.SUNSETRAVENS.COM": + case "INVIDIOUS.GACHIRANGERS.COM": { // code-block for hooktube.com and Invidious instances if (path.equals("watch")) { String viewQueryValue = Utils.getQueryValue(url, "v"); if (viewQueryValue != null) { diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/stream/StreamExtractor.java b/extractor/src/main/java/org/schabi/newpipe/extractor/stream/StreamExtractor.java index 2cc64ab70..f48bb913f 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/stream/StreamExtractor.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/stream/StreamExtractor.java @@ -30,13 +30,14 @@ import org.schabi.newpipe.extractor.linkhandler.LinkHandler; import org.schabi.newpipe.extractor.localization.DateWrapper; import org.schabi.newpipe.extractor.utils.Parser; -import javax.annotation.Nonnull; -import javax.annotation.Nullable; import java.io.IOException; import java.util.Collections; import java.util.List; import java.util.Locale; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; + /** * Scrapes information from a video/audio streaming service (eg, YouTube). */ @@ -309,23 +310,12 @@ public abstract class StreamExtractor extends Extractor { */ public abstract StreamType getStreamType() throws ParsingException; - /** - * Should return the url of the next stream. NewPipe will automatically play - * the next stream if the user wants that. - * If the next stream is is not available simply return null - * - * @return the InfoItem of the next stream - * @throws IOException - * @throws ExtractionException - */ - public abstract StreamInfoItem getNextStream() throws IOException, ExtractionException; - /** * Should return a list of streams related to the current handled. Many services show suggested * streams. If you don't like suggested streams you should implement them anyway since they can - * be disabled by the user later in the frontend. - * This list MUST NOT contain the next available video as this should be return through getNextStream() - * If it is not available simply return null + * be disabled by the user later in the frontend. The first related stream might be what was + * previously known as a next stream. + * If related streams aren't available simply return {@code null}. * * @return a list of InfoItems showing the related videos/streams * @throws IOException @@ -337,11 +327,10 @@ public abstract class StreamExtractor extends Extractor { * Should return a list of Frameset object that contains preview of stream frames * * @return list of preview frames or empty list if frames preview is not supported or not found for specified stream - * @throws IOException * @throws ExtractionException */ @Nonnull - public List getFrames() throws IOException, ExtractionException { + public List getFrames() throws ExtractionException { return Collections.emptyList(); } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/stream/StreamInfo.java b/extractor/src/main/java/org/schabi/newpipe/extractor/stream/StreamInfo.java index 3878e593a..805f26122 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/stream/StreamInfo.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/stream/StreamInfo.java @@ -282,11 +282,6 @@ public class StreamInfo extends Info { } catch (Exception e) { streamInfo.addError(e); } - try { - streamInfo.setNextVideo(extractor.getNextStream()); - } catch (Exception e) { - streamInfo.addError(e); - } try { streamInfo.setSubtitles(extractor.getSubtitlesDefault()); } catch (Exception e) { @@ -366,7 +361,6 @@ public class StreamInfo extends Info { private String hlsUrl = ""; - private StreamInfoItem nextVideo; private List relatedStreams = new ArrayList<>(); private long startPosition = 0; @@ -597,14 +591,6 @@ public class StreamInfo extends Info { this.hlsUrl = hlsUrl; } - public StreamInfoItem getNextVideo() { - return nextVideo; - } - - public void setNextVideo(StreamInfoItem nextVideo) { - this.nextVideo = nextVideo; - } - public List getRelatedStreams() { return relatedStreams; } diff --git a/extractor/src/main/java/org/schabi/newpipe/extractor/utils/Utils.java b/extractor/src/main/java/org/schabi/newpipe/extractor/utils/Utils.java index 5b70ce59c..288e401c3 100644 --- a/extractor/src/main/java/org/schabi/newpipe/extractor/utils/Utils.java +++ b/extractor/src/main/java/org/schabi/newpipe/extractor/utils/Utils.java @@ -7,6 +7,7 @@ import java.net.MalformedURLException; import java.net.URL; import java.net.URLDecoder; import java.util.Collection; +import java.util.Iterator; import java.util.List; import java.util.Map; @@ -222,4 +223,16 @@ public class Utils { return true; } + + public static String join(final CharSequence delimiter, final Iterable elements) { + final StringBuilder stringBuilder = new StringBuilder(); + final Iterator iterator = elements.iterator(); + while (iterator.hasNext()) { + stringBuilder.append(iterator.next()); + if (iterator.hasNext()) { + stringBuilder.append(delimiter); + } + } + return stringBuilder.toString(); + } } diff --git a/extractor/src/test/java/org/schabi/newpipe/DownloaderTestImpl.java b/extractor/src/test/java/org/schabi/newpipe/DownloaderTestImpl.java index c93f31b76..a28754f3c 100644 --- a/extractor/src/test/java/org/schabi/newpipe/DownloaderTestImpl.java +++ b/extractor/src/test/java/org/schabi/newpipe/DownloaderTestImpl.java @@ -4,123 +4,97 @@ import org.schabi.newpipe.extractor.downloader.Downloader; import org.schabi.newpipe.extractor.downloader.Request; import org.schabi.newpipe.extractor.downloader.Response; import org.schabi.newpipe.extractor.exceptions.ReCaptchaException; -import org.schabi.newpipe.extractor.localization.Localization; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; import javax.annotation.Nonnull; import javax.annotation.Nullable; -import javax.net.ssl.HttpsURLConnection; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.OutputStream; -import java.net.URL; -import java.net.URLConnection; -import java.util.List; -import java.util.Map; -public class DownloaderTestImpl extends Downloader { +import okhttp3.OkHttpClient; +import okhttp3.RequestBody; +import okhttp3.ResponseBody; - private static final String USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:68.0) Gecko/20100101 Firefox/68.0"; - private static final String DEFAULT_HTTP_ACCEPT_LANGUAGE = "en"; +public final class DownloaderTestImpl extends Downloader { + private static final String USER_AGENT + = "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:68.0) Gecko/20100101 Firefox/68.0"; + private static DownloaderTestImpl instance; + private OkHttpClient client; - private static DownloaderTestImpl instance = null; + private DownloaderTestImpl(final OkHttpClient.Builder builder) { + this.client = builder.readTimeout(30, TimeUnit.SECONDS).build(); + } - private DownloaderTestImpl() { + /** + * It's recommended to call exactly once in the entire lifetime of the application. + * + * @param builder if null, default builder will be used + * @return a new instance of {@link DownloaderTestImpl} + */ + public static DownloaderTestImpl init(@Nullable final OkHttpClient.Builder builder) { + instance = new DownloaderTestImpl( + builder != null ? builder : new OkHttpClient.Builder()); + return instance; } public static DownloaderTestImpl getInstance() { if (instance == null) { - synchronized (DownloaderTestImpl.class) { - if (instance == null) { - instance = new DownloaderTestImpl(); - } - } + init(null); } return instance; } - private void setDefaultHeaders(URLConnection connection) { - connection.setRequestProperty("User-Agent", USER_AGENT); - connection.setRequestProperty("Accept-Language", DEFAULT_HTTP_ACCEPT_LANGUAGE); - } - @Override - public Response execute(@Nonnull Request request) throws IOException, ReCaptchaException { + public Response execute(@Nonnull final Request request) + throws IOException, ReCaptchaException { final String httpMethod = request.httpMethod(); final String url = request.url(); final Map> headers = request.headers(); - @Nullable final byte[] dataToSend = request.dataToSend(); - @Nullable final Localization localization = request.localization(); + final byte[] dataToSend = request.dataToSend(); - final HttpsURLConnection connection = (HttpsURLConnection) new URL(url).openConnection(); + RequestBody requestBody = null; + if (dataToSend != null) { + requestBody = RequestBody.create(null, dataToSend); + } - connection.setConnectTimeout(30 * 1000); // 30s - connection.setReadTimeout(30 * 1000); // 30s - connection.setRequestMethod(httpMethod); - - setDefaultHeaders(connection); + final okhttp3.Request.Builder requestBuilder = new okhttp3.Request.Builder() + .method(httpMethod, requestBody).url(url) + .addHeader("User-Agent", USER_AGENT); for (Map.Entry> pair : headers.entrySet()) { final String headerName = pair.getKey(); final List headerValueList = pair.getValue(); if (headerValueList.size() > 1) { - connection.setRequestProperty(headerName, null); + requestBuilder.removeHeader(headerName); for (String headerValue : headerValueList) { - connection.addRequestProperty(headerName, headerValue); + requestBuilder.addHeader(headerName, headerValue); } } else if (headerValueList.size() == 1) { - connection.setRequestProperty(headerName, headerValueList.get(0)); + requestBuilder.header(headerName, headerValueList.get(0)); } + } - @Nullable OutputStream outputStream = null; - @Nullable InputStreamReader input = null; - try { - if (dataToSend != null && dataToSend.length > 0) { - connection.setDoOutput(true); - connection.setRequestProperty("Content-Length", dataToSend.length + ""); - outputStream = connection.getOutputStream(); - outputStream.write(dataToSend); - } + final okhttp3.Response response = client.newCall(requestBuilder.build()).execute(); - final InputStream inputStream = connection.getInputStream(); - final StringBuilder response = new StringBuilder(); + if (response.code() == 429) { + response.close(); - // Not passing any charset for decoding here... something to keep in mind. - input = new InputStreamReader(inputStream); - - int readCount; - char[] buffer = new char[32 * 1024]; - while ((readCount = input.read(buffer)) != -1) { - response.append(buffer, 0, readCount); - } - - final int responseCode = connection.getResponseCode(); - final String responseMessage = connection.getResponseMessage(); - final Map> responseHeaders = connection.getHeaderFields(); - final String latestUrl = connection.getURL().toString(); - - return new Response(responseCode, responseMessage, responseHeaders, response.toString(), latestUrl); - } catch (Exception e) { - final int responseCode = connection.getResponseCode(); - - /* - * HTTP 429 == Too Many Request - * Receive from Youtube.com = ReCaptcha challenge request - * See : https://github.com/rg3/youtube-dl/issues/5138 - */ - if (responseCode == 429) { - throw new ReCaptchaException("reCaptcha Challenge requested", url); - } else if (responseCode != -1) { - final String latestUrl = connection.getURL().toString(); - return new Response(responseCode, connection.getResponseMessage(), connection.getHeaderFields(), null, latestUrl); - } - - throw new IOException("Error occurred while fetching the content", e); - } finally { - if (outputStream != null) outputStream.close(); - if (input != null) input.close(); + throw new ReCaptchaException("reCaptcha Challenge requested", url); } + + final ResponseBody body = response.body(); + String responseBodyToReturn = null; + + if (body != null) { + responseBodyToReturn = body.string(); + } + + final String latestUrl = response.request().url().toString(); + return new Response(response.code(), response.message(), response.headers().toMultimap(), + responseBodyToReturn, latestUrl); } } diff --git a/extractor/src/test/java/org/schabi/newpipe/extractor/services/DefaultTests.java b/extractor/src/test/java/org/schabi/newpipe/extractor/services/DefaultTests.java index 82f75298a..a335b0eca 100644 --- a/extractor/src/test/java/org/schabi/newpipe/extractor/services/DefaultTests.java +++ b/extractor/src/test/java/org/schabi/newpipe/extractor/services/DefaultTests.java @@ -2,6 +2,7 @@ package org.schabi.newpipe.extractor.services; import org.schabi.newpipe.extractor.InfoItem; import org.schabi.newpipe.extractor.ListExtractor; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.channel.ChannelInfoItem; import org.schabi.newpipe.extractor.exceptions.ParsingException; @@ -84,9 +85,8 @@ public final class DefaultTests { } public static void assertNoMoreItems(ListExtractor extractor) throws Exception { - assertFalse("More items available when it shouldn't", extractor.hasNextPage()); - final String nextPageUrl = extractor.getNextPageUrl(); - assertTrue("Next page is not empty or null", isNullOrEmpty(nextPageUrl)); + final ListExtractor.InfoItemsPage initialPage = extractor.getInitialPage(); + assertFalse("More items available when it shouldn't", initialPage.hasNextPage()); } public static void assertNoDuplicatedItems(StreamingService expectedService, @@ -118,8 +118,9 @@ public final class DefaultTests { } public static ListExtractor.InfoItemsPage defaultTestMoreItems(ListExtractor extractor) throws Exception { - assertTrue("Doesn't have more items", extractor.hasNextPage()); - ListExtractor.InfoItemsPage nextPage = extractor.getPage(extractor.getNextPageUrl()); + final ListExtractor.InfoItemsPage initialPage = extractor.getInitialPage(); + assertTrue("Doesn't have more items", initialPage.hasNextPage()); + ListExtractor.InfoItemsPage nextPage = extractor.getPage(initialPage.getNextPage()); final List items = nextPage.getItems(); assertFalse("Next page is empty", items.isEmpty()); assertEmptyErrors("Next page have errors", nextPage.getErrors()); @@ -129,9 +130,9 @@ public final class DefaultTests { } public static void defaultTestGetPageInNewExtractor(ListExtractor extractor, ListExtractor newExtractor) throws Exception { - final String nextPageUrl = extractor.getNextPageUrl(); + final Page nextPage = extractor.getInitialPage().getNextPage(); - final ListExtractor.InfoItemsPage page = newExtractor.getPage(nextPageUrl); + final ListExtractor.InfoItemsPage page = newExtractor.getPage(nextPage); defaultTestListOfItems(extractor.getService(), page.getItems(), page.getErrors()); } } diff --git a/extractor/src/test/java/org/schabi/newpipe/extractor/services/bandcamp/BandcampPlaylistExtractorTest.java b/extractor/src/test/java/org/schabi/newpipe/extractor/services/bandcamp/BandcampPlaylistExtractorTest.java index 88836efab..1b1039e6f 100644 --- a/extractor/src/test/java/org/schabi/newpipe/extractor/services/bandcamp/BandcampPlaylistExtractorTest.java +++ b/extractor/src/test/java/org/schabi/newpipe/extractor/services/bandcamp/BandcampPlaylistExtractorTest.java @@ -143,7 +143,7 @@ public class BandcampPlaylistExtractorTest { @Test public void getNextPageUrl() throws IOException, ExtractionException { - assertNull(extractor.getNextPageUrl()); + assertNull(extractor.getPage(extractor.getInitialPage().getNextPage())); } } } diff --git a/extractor/src/test/java/org/schabi/newpipe/extractor/services/bandcamp/BandcampSearchExtractorTest.java b/extractor/src/test/java/org/schabi/newpipe/extractor/services/bandcamp/BandcampSearchExtractorTest.java index 532f27a53..7d7655190 100644 --- a/extractor/src/test/java/org/schabi/newpipe/extractor/services/bandcamp/BandcampSearchExtractorTest.java +++ b/extractor/src/test/java/org/schabi/newpipe/extractor/services/bandcamp/BandcampSearchExtractorTest.java @@ -5,11 +5,7 @@ package org.schabi.newpipe.extractor.services.bandcamp; import org.junit.BeforeClass; import org.junit.Test; import org.schabi.newpipe.DownloaderTestImpl; -import org.schabi.newpipe.extractor.Extractor; -import org.schabi.newpipe.extractor.InfoItem; -import org.schabi.newpipe.extractor.ListExtractor; -import org.schabi.newpipe.extractor.NewPipe; -import org.schabi.newpipe.extractor.StreamingService; +import org.schabi.newpipe.extractor.*; import org.schabi.newpipe.extractor.exceptions.ExtractionException; import org.schabi.newpipe.extractor.playlist.PlaylistInfoItem; import org.schabi.newpipe.extractor.search.SearchExtractor; @@ -102,9 +98,11 @@ public class BandcampSearchExtractorTest { // A query practically guaranteed to have the maximum amount of pages SearchExtractor extractor = Bandcamp.getSearchExtractor("e"); - assertEquals("https://bandcamp.com/search?q=e&page=2", extractor.getInitialPage().getNextPageUrl()); + Page page2 = extractor.getInitialPage().getNextPage(); + assertEquals("https://bandcamp.com/search?q=e&page=2", page2.getUrl()); - assertEquals("https://bandcamp.com/search?q=e&page=3", extractor.getPage(extractor.getNextPageUrl()).getNextPageUrl()); + Page page3 = extractor.getPage(page2).getNextPage(); + assertEquals("https://bandcamp.com/search?q=e&page=3", page3.getUrl()); } public static class DefaultTest extends DefaultSearchExtractorTest { diff --git a/extractor/src/test/java/org/schabi/newpipe/extractor/services/peertube/PeertubeCommentsExtractorTest.java b/extractor/src/test/java/org/schabi/newpipe/extractor/services/peertube/PeertubeCommentsExtractorTest.java index 7e4923995..63acab9c6 100644 --- a/extractor/src/test/java/org/schabi/newpipe/extractor/services/peertube/PeertubeCommentsExtractorTest.java +++ b/extractor/src/test/java/org/schabi/newpipe/extractor/services/peertube/PeertubeCommentsExtractorTest.java @@ -5,6 +5,7 @@ import org.junit.Test; import org.schabi.newpipe.DownloaderTestImpl; import org.schabi.newpipe.extractor.ListExtractor.InfoItemsPage; import org.schabi.newpipe.extractor.NewPipe; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.comments.CommentsInfo; import org.schabi.newpipe.extractor.comments.CommentsInfoItem; import org.schabi.newpipe.extractor.exceptions.ExtractionException; @@ -14,78 +15,104 @@ import org.schabi.newpipe.extractor.utils.Utils; import java.io.IOException; import java.util.List; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import static org.schabi.newpipe.extractor.ServiceList.PeerTube; public class PeertubeCommentsExtractorTest { + public static class Default { + private static PeertubeCommentsExtractor extractor; - private static PeertubeCommentsExtractor extractor; - - @BeforeClass - public static void setUp() throws Exception { - NewPipe.init(DownloaderTestImpl.getInstance()); - extractor = (PeertubeCommentsExtractor) PeerTube - .getCommentsExtractor("https://framatube.org/videos/watch/04af977f-4201-4697-be67-a8d8cae6fa7a"); - } - - @Test - public void testGetComments() throws IOException, ExtractionException { - boolean result = false; - InfoItemsPage comments = extractor.getInitialPage(); - result = findInComments(comments, "@root A great documentary on a great guy."); - - while (comments.hasNextPage() && !result) { - comments = extractor.getPage(comments.getNextPageUrl()); - result = findInComments(comments, "@root A great documentary on a great guy."); + @BeforeClass + public static void setUp() throws Exception { + NewPipe.init(DownloaderTestImpl.getInstance()); + extractor = (PeertubeCommentsExtractor) PeerTube + .getCommentsExtractor("https://framatube.org/videos/watch/04af977f-4201-4697-be67-a8d8cae6fa7a"); } - assertTrue(result); - } + @Test + public void testGetComments() throws IOException, ExtractionException { + InfoItemsPage comments = extractor.getInitialPage(); + boolean result = findInComments(comments, "@root A great documentary on a great guy."); - @Test - public void testGetCommentsFromCommentsInfo() throws IOException, ExtractionException { - boolean result = false; - CommentsInfo commentsInfo = CommentsInfo.getInfo("https://framatube.org/videos/watch/a8ea95b8-0396-49a6-8f30-e25e25fb2828"); - assertEquals("Comments", commentsInfo.getName()); - result = findInComments(commentsInfo.getRelatedItems(), "Loved it!!!"); + while (comments.hasNextPage() && !result) { + comments = extractor.getPage(comments.getNextPage()); + result = findInComments(comments, "@root A great documentary on a great guy."); + } - String nextPage = commentsInfo.getNextPageUrl(); - while (!Utils.isBlank(nextPage) && !result) { - InfoItemsPage moreItems = CommentsInfo.getMoreItems(PeerTube, commentsInfo, nextPage); - result = findInComments(moreItems.getItems(), "Loved it!!!"); - nextPage = moreItems.getNextPageUrl(); + assertTrue(result); } - assertTrue(result); - } + @Test + public void testGetCommentsFromCommentsInfo() throws IOException, ExtractionException { + CommentsInfo commentsInfo = CommentsInfo.getInfo("https://framatube.org/videos/watch/a8ea95b8-0396-49a6-8f30-e25e25fb2828"); + assertEquals("Comments", commentsInfo.getName()); - @Test - public void testGetCommentsAllData() throws IOException, ExtractionException { - InfoItemsPage comments = extractor.getInitialPage(); - for (CommentsInfoItem c : comments.getItems()) { - assertFalse(Utils.isBlank(c.getUploaderUrl())); - assertFalse(Utils.isBlank(c.getUploaderName())); - assertFalse(Utils.isBlank(c.getUploaderAvatarUrl())); - assertFalse(Utils.isBlank(c.getCommentId())); - assertFalse(Utils.isBlank(c.getCommentText())); - assertFalse(Utils.isBlank(c.getName())); - assertFalse(Utils.isBlank(c.getTextualUploadDate())); - assertFalse(Utils.isBlank(c.getThumbnailUrl())); - assertFalse(Utils.isBlank(c.getUrl())); - assertFalse(c.getLikeCount() != -1); + boolean result = findInComments(commentsInfo.getRelatedItems(), "Loved it!!!"); + + Page nextPage = commentsInfo.getNextPage(); + InfoItemsPage moreItems = new InfoItemsPage<>(null, nextPage, null); + while (moreItems.hasNextPage() && !result) { + moreItems = CommentsInfo.getMoreItems(PeerTube, commentsInfo, nextPage); + result = findInComments(moreItems.getItems(), "Loved it!!!"); + nextPage = moreItems.getNextPage(); + } + + assertTrue(result); } - } - private boolean findInComments(InfoItemsPage comments, String comment) { - return findInComments(comments.getItems(), comment); - } - - private boolean findInComments(List comments, String comment) { - for (CommentsInfoItem c : comments) { - if (c.getCommentText().contains(comment)) { - return true; + @Test + public void testGetCommentsAllData() throws IOException, ExtractionException { + InfoItemsPage comments = extractor.getInitialPage(); + for (CommentsInfoItem c : comments.getItems()) { + assertFalse(Utils.isBlank(c.getUploaderUrl())); + assertFalse(Utils.isBlank(c.getUploaderName())); + assertFalse(Utils.isBlank(c.getUploaderAvatarUrl())); + assertFalse(Utils.isBlank(c.getCommentId())); + assertFalse(Utils.isBlank(c.getCommentText())); + assertFalse(Utils.isBlank(c.getName())); + assertFalse(Utils.isBlank(c.getTextualUploadDate())); + assertFalse(Utils.isBlank(c.getThumbnailUrl())); + assertFalse(Utils.isBlank(c.getUrl())); + assertFalse(c.getLikeCount() != -1); } } - return false; + + private boolean findInComments(InfoItemsPage comments, String comment) { + return findInComments(comments.getItems(), comment); + } + + private boolean findInComments(List comments, String comment) { + for (CommentsInfoItem c : comments) { + if (c.getCommentText().contains(comment)) { + return true; + } + } + return false; + } + } + + public static class DeletedComments { + private static PeertubeCommentsExtractor extractor; + + @BeforeClass + public static void setUp() throws Exception { + NewPipe.init(DownloaderTestImpl.getInstance()); + extractor = (PeertubeCommentsExtractor) PeerTube + .getCommentsExtractor("https://framatube.org/videos/watch/217eefeb-883d-45be-b7fc-a788ad8507d3"); + } + + @Test + public void testGetComments() throws IOException, ExtractionException { + final InfoItemsPage comments = extractor.getInitialPage(); + assertTrue(comments.getErrors().isEmpty()); + } + + @Test + public void testGetCommentsFromCommentsInfo() throws IOException, ExtractionException { + final CommentsInfo commentsInfo = CommentsInfo.getInfo("https://framatube.org/videos/watch/217eefeb-883d-45be-b7fc-a788ad8507d3"); + assertTrue(commentsInfo.getErrors().isEmpty()); + } } } diff --git a/extractor/src/test/java/org/schabi/newpipe/extractor/services/peertube/PeertubeStreamExtractorDefaultTest.java b/extractor/src/test/java/org/schabi/newpipe/extractor/services/peertube/PeertubeStreamExtractorDefaultTest.java index dd4c9c449..c3d8c7169 100644 --- a/extractor/src/test/java/org/schabi/newpipe/extractor/services/peertube/PeertubeStreamExtractorDefaultTest.java +++ b/extractor/src/test/java/org/schabi/newpipe/extractor/services/peertube/PeertubeStreamExtractorDefaultTest.java @@ -161,7 +161,7 @@ public class PeertubeStreamExtractorDefaultTest { @Test public void testGetAgeLimit() throws ExtractionException, IOException { assertEquals(0, extractor.getAgeLimit()); - PeertubeStreamExtractor ageLimit = (PeertubeStreamExtractor) PeerTube.getStreamExtractor("https://peertube.co.uk/videos/watch/0d501633-f2d9-4476-87c6-71f1c02402a4"); + PeertubeStreamExtractor ageLimit = (PeertubeStreamExtractor) PeerTube.getStreamExtractor("https://nocensoring.net/videos/embed/dbd8e5e1-c527-49b6-b70c-89101dbb9c08"); ageLimit.fetchPage(); assertEquals(18, ageLimit.getAgeLimit()); } diff --git a/extractor/src/test/java/org/schabi/newpipe/extractor/services/peertube/PeertubeStreamLinkHandlerFactoryTest.java b/extractor/src/test/java/org/schabi/newpipe/extractor/services/peertube/PeertubeStreamLinkHandlerFactoryTest.java index af44f6444..f217b8e3a 100644 --- a/extractor/src/test/java/org/schabi/newpipe/extractor/services/peertube/PeertubeStreamLinkHandlerFactoryTest.java +++ b/extractor/src/test/java/org/schabi/newpipe/extractor/services/peertube/PeertubeStreamLinkHandlerFactoryTest.java @@ -26,6 +26,7 @@ public class PeertubeStreamLinkHandlerFactoryTest { public void getId() throws Exception { assertEquals("986aac60-1263-4f73-9ce5-36b18225cb60", linkHandler.fromUrl("https://peertube.mastodon.host/videos/watch/986aac60-1263-4f73-9ce5-36b18225cb60").getId()); assertEquals("986aac60-1263-4f73-9ce5-36b18225cb60", linkHandler.fromUrl("https://peertube.mastodon.host/videos/watch/986aac60-1263-4f73-9ce5-36b18225cb60?fsdafs=fsafa").getId()); + assertEquals("9c9de5e8-0a1e-484a-b099-e80766180a6d", linkHandler.fromUrl("https://framatube.org/videos/embed/9c9de5e8-0a1e-484a-b099-e80766180a6d").getId()); } @@ -33,5 +34,6 @@ public class PeertubeStreamLinkHandlerFactoryTest { public void testAcceptUrl() throws ParsingException { assertTrue(linkHandler.acceptUrl("https://peertube.mastodon.host/videos/watch/986aac60-1263-4f73-9ce5-36b18225cb60")); assertTrue(linkHandler.acceptUrl("https://peertube.mastodon.host/videos/watch/986aac60-1263-4f73-9ce5-36b18225cb60?fsdafs=fsafa")); + assertTrue(linkHandler.acceptUrl("https://framatube.org/videos/embed/9c9de5e8-0a1e-484a-b099-e80766180a6d")); } } \ No newline at end of file diff --git a/extractor/src/test/java/org/schabi/newpipe/extractor/services/peertube/search/PeertubeSearchExtractorTest.java b/extractor/src/test/java/org/schabi/newpipe/extractor/services/peertube/search/PeertubeSearchExtractorTest.java index 5ea116333..80654a402 100644 --- a/extractor/src/test/java/org/schabi/newpipe/extractor/services/peertube/search/PeertubeSearchExtractorTest.java +++ b/extractor/src/test/java/org/schabi/newpipe/extractor/services/peertube/search/PeertubeSearchExtractorTest.java @@ -51,7 +51,7 @@ public class PeertubeSearchExtractorTest { extractor.fetchPage(); final InfoItemsPage page1 = extractor.getInitialPage(); - final InfoItemsPage page2 = extractor.getPage(page1.getNextPageUrl()); + final InfoItemsPage page2 = extractor.getPage(page1.getNextPage()); assertNoDuplicatedItems(PeerTube, page1, page2); } diff --git a/extractor/src/test/java/org/schabi/newpipe/extractor/services/soundcloud/SoundcloudPlaylistExtractorTest.java b/extractor/src/test/java/org/schabi/newpipe/extractor/services/soundcloud/SoundcloudPlaylistExtractorTest.java index 6a366fd30..de9094c07 100644 --- a/extractor/src/test/java/org/schabi/newpipe/extractor/services/soundcloud/SoundcloudPlaylistExtractorTest.java +++ b/extractor/src/test/java/org/schabi/newpipe/extractor/services/soundcloud/SoundcloudPlaylistExtractorTest.java @@ -268,7 +268,7 @@ public class SoundcloudPlaylistExtractorTest { ListExtractor.InfoItemsPage currentPage = defaultTestMoreItems(extractor); // Test for 2 more levels for (int i = 0; i < 2; i++) { - currentPage = extractor.getPage(currentPage.getNextPageUrl()); + currentPage = extractor.getPage(currentPage.getNextPage()); defaultTestListOfItems(SoundCloud, currentPage.getItems(), currentPage.getErrors()); } } diff --git a/extractor/src/test/java/org/schabi/newpipe/extractor/services/soundcloud/search/SoundcloudSearchExtractorTest.java b/extractor/src/test/java/org/schabi/newpipe/extractor/services/soundcloud/search/SoundcloudSearchExtractorTest.java index c99bf2a5b..4c3200126 100644 --- a/extractor/src/test/java/org/schabi/newpipe/extractor/services/soundcloud/search/SoundcloudSearchExtractorTest.java +++ b/extractor/src/test/java/org/schabi/newpipe/extractor/services/soundcloud/search/SoundcloudSearchExtractorTest.java @@ -119,7 +119,7 @@ public class SoundcloudSearchExtractorTest { extractor.fetchPage(); final InfoItemsPage page1 = extractor.getInitialPage(); - final InfoItemsPage page2 = extractor.getPage(page1.getNextPageUrl()); + final InfoItemsPage page2 = extractor.getPage(page1.getNextPage()); assertNoDuplicatedItems(SoundCloud, page1, page2); } diff --git a/extractor/src/test/java/org/schabi/newpipe/extractor/services/youtube/YoutubeCommentsExtractorTest.java b/extractor/src/test/java/org/schabi/newpipe/extractor/services/youtube/YoutubeCommentsExtractorTest.java index a9f6dfa3b..bb2b17bea 100644 --- a/extractor/src/test/java/org/schabi/newpipe/extractor/services/youtube/YoutubeCommentsExtractorTest.java +++ b/extractor/src/test/java/org/schabi/newpipe/extractor/services/youtube/YoutubeCommentsExtractorTest.java @@ -5,6 +5,7 @@ import org.junit.Test; import org.schabi.newpipe.DownloaderTestImpl; import org.schabi.newpipe.extractor.ListExtractor.InfoItemsPage; import org.schabi.newpipe.extractor.NewPipe; +import org.schabi.newpipe.extractor.Page; import org.schabi.newpipe.extractor.comments.CommentsInfo; import org.schabi.newpipe.extractor.comments.CommentsInfoItem; import org.schabi.newpipe.extractor.exceptions.ExtractionException; @@ -22,39 +23,33 @@ import static org.junit.Assert.assertTrue; import static org.schabi.newpipe.extractor.ServiceList.YouTube; public class YoutubeCommentsExtractorTest { - private static final String urlYT = "https://www.youtube.com/watch?v=D00Au7k3i6o"; private static final String urlInvidious = "https://invidio.us/watch?v=D00Au7k3i6o"; - private static final String urlInvidioush = "https://invidiou.sh/watch?v=D00Au7k3i6o"; private static YoutubeCommentsExtractor extractorYT; private static YoutubeCommentsExtractor extractorInvidious; - private static YoutubeCommentsExtractor extractorInvidioush; @BeforeClass public static void setUp() throws Exception { NewPipe.init(DownloaderTestImpl.getInstance()); extractorYT = (YoutubeCommentsExtractor) YouTube .getCommentsExtractor(urlYT); + extractorYT.fetchPage(); extractorInvidious = (YoutubeCommentsExtractor) YouTube .getCommentsExtractor(urlInvidious); - extractorInvidioush = (YoutubeCommentsExtractor) YouTube - .getCommentsExtractor(urlInvidioush); } @Test public void testGetComments() throws IOException, ExtractionException { assertTrue(getCommentsHelper(extractorYT)); assertTrue(getCommentsHelper(extractorInvidious)); - assertTrue(getCommentsHelper(extractorInvidioush)); } private boolean getCommentsHelper(YoutubeCommentsExtractor extractor) throws IOException, ExtractionException { - boolean result; InfoItemsPage comments = extractor.getInitialPage(); - result = findInComments(comments, "s1ck m3m3"); + boolean result = findInComments(comments, "s1ck m3m3"); while (comments.hasNextPage() && !result) { - comments = extractor.getPage(comments.getNextPageUrl()); + comments = extractor.getPage(comments.getNextPage()); result = findInComments(comments, "s1ck m3m3"); } @@ -65,20 +60,21 @@ public class YoutubeCommentsExtractorTest { public void testGetCommentsFromCommentsInfo() throws IOException, ExtractionException { assertTrue(getCommentsFromCommentsInfoHelper(urlYT)); assertTrue(getCommentsFromCommentsInfoHelper(urlInvidious)); - assertTrue(getCommentsFromCommentsInfoHelper(urlInvidioush)); } private boolean getCommentsFromCommentsInfoHelper(String url) throws IOException, ExtractionException { - boolean result = false; CommentsInfo commentsInfo = CommentsInfo.getInfo(url); - result = findInComments(commentsInfo.getRelatedItems(), "s1ck m3m3"); - /* String nextPage = commentsInfo.getNextPageUrl(); - while (!Utils.isBlank(nextPage) && !result) { - InfoItemsPage moreItems = CommentsInfo.getMoreItems(YouTube, commentsInfo, nextPage); + assertEquals("Comments", commentsInfo.getName()); + boolean result = findInComments(commentsInfo.getRelatedItems(), "s1ck m3m3"); + + Page nextPage = commentsInfo.getNextPage(); + InfoItemsPage moreItems = new InfoItemsPage<>(null, nextPage, null); + while (moreItems.hasNextPage() && !result) { + moreItems = CommentsInfo.getMoreItems(YouTube, commentsInfo, nextPage); result = findInComments(moreItems.getItems(), "s1ck m3m3"); - nextPage = moreItems.getNextPageUrl(); - }*/ + nextPage = moreItems.getNextPage(); + } return result; } diff --git a/extractor/src/test/java/org/schabi/newpipe/extractor/services/youtube/YoutubeParsingHelperTest.java b/extractor/src/test/java/org/schabi/newpipe/extractor/services/youtube/YoutubeParsingHelperTest.java index 0a3e007fc..34a300876 100644 --- a/extractor/src/test/java/org/schabi/newpipe/extractor/services/youtube/YoutubeParsingHelperTest.java +++ b/extractor/src/test/java/org/schabi/newpipe/extractor/services/youtube/YoutubeParsingHelperTest.java @@ -5,9 +5,11 @@ import org.junit.Test; import org.schabi.newpipe.DownloaderTestImpl; import org.schabi.newpipe.extractor.NewPipe; import org.schabi.newpipe.extractor.exceptions.ExtractionException; +import org.schabi.newpipe.extractor.exceptions.ParsingException; import java.io.IOException; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; public class YoutubeParsingHelperTest { @@ -27,4 +29,11 @@ public class YoutubeParsingHelperTest { assertTrue("Hardcoded YouTube Music keys are not valid anymore", YoutubeParsingHelper.areHardcodedYoutubeMusicKeysValid()); } + + @Test + public void testParseDurationString() throws ParsingException { + assertEquals(1162567, YoutubeParsingHelper.parseDurationString("12:34:56:07")); + assertEquals(4445767, YoutubeParsingHelper.parseDurationString("1,234:56:07")); + assertEquals(754, YoutubeParsingHelper.parseDurationString("12:34 ")); + } } diff --git a/extractor/src/test/java/org/schabi/newpipe/extractor/services/youtube/YoutubePlaylistExtractorTest.java b/extractor/src/test/java/org/schabi/newpipe/extractor/services/youtube/YoutubePlaylistExtractorTest.java index f2e15a3fa..2b03579f9 100644 --- a/extractor/src/test/java/org/schabi/newpipe/extractor/services/youtube/YoutubePlaylistExtractorTest.java +++ b/extractor/src/test/java/org/schabi/newpipe/extractor/services/youtube/YoutubePlaylistExtractorTest.java @@ -209,7 +209,7 @@ public class YoutubePlaylistExtractorTest { // test for 2 more levels for (int i = 0; i < 2; i++) { - currentPage = extractor.getPage(currentPage.getNextPageUrl()); + currentPage = extractor.getPage(currentPage.getNextPage()); defaultTestListOfItems(YouTube, currentPage.getItems(), currentPage.getErrors()); } } diff --git a/extractor/src/test/java/org/schabi/newpipe/extractor/services/youtube/search/YoutubeSearchExtractorTest.java b/extractor/src/test/java/org/schabi/newpipe/extractor/services/youtube/search/YoutubeSearchExtractorTest.java index 518796fb2..6e07b0211 100644 --- a/extractor/src/test/java/org/schabi/newpipe/extractor/services/youtube/search/YoutubeSearchExtractorTest.java +++ b/extractor/src/test/java/org/schabi/newpipe/extractor/services/youtube/search/YoutubeSearchExtractorTest.java @@ -19,8 +19,9 @@ import static org.junit.Assert.assertTrue; import static org.schabi.newpipe.extractor.ExtractorAsserts.assertEmptyErrors; import static org.schabi.newpipe.extractor.ServiceList.YouTube; import static org.schabi.newpipe.extractor.services.DefaultTests.assertNoDuplicatedItems; -import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeSearchQueryHandlerFactory.*; -import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty; +import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeSearchQueryHandlerFactory.CHANNELS; +import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeSearchQueryHandlerFactory.PLAYLISTS; +import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeSearchQueryHandlerFactory.VIDEOS; public class YoutubeSearchExtractorTest { public static class All extends DefaultSearchExtractorTest { @@ -186,15 +187,14 @@ public class YoutubeSearchExtractorTest { @Test public void testMoreRelatedItems() throws Exception { + final ListExtractor.InfoItemsPage initialPage = extractor().getInitialPage(); // YouTube actually gives us an empty next page, but after that, no more pages. - assertTrue(extractor.hasNextPage()); - final ListExtractor.InfoItemsPage nextEmptyPage = extractor.getPage(extractor.getNextPageUrl()); + assertTrue(initialPage.hasNextPage()); + final ListExtractor.InfoItemsPage nextEmptyPage = extractor.getPage(initialPage.getNextPage()); assertEquals(0, nextEmptyPage.getItems().size()); assertEmptyErrors("Empty page has errors", nextEmptyPage.getErrors()); assertFalse("More items available when it shouldn't", nextEmptyPage.hasNextPage()); - final String nextPageUrl = nextEmptyPage.getNextPageUrl(); - assertTrue("Next page is not empty or null", isNullOrEmpty(nextPageUrl)); } } @@ -206,7 +206,7 @@ public class YoutubeSearchExtractorTest { extractor.fetchPage(); final ListExtractor.InfoItemsPage page1 = extractor.getInitialPage(); - final ListExtractor.InfoItemsPage page2 = extractor.getPage(page1.getNextPageUrl()); + final ListExtractor.InfoItemsPage page2 = extractor.getPage(page1.getNextPage()); assertNoDuplicatedItems(YouTube, page1, page2); } diff --git a/extractor/src/test/java/org/schabi/newpipe/extractor/services/youtube/stream/YoutubeStreamExtractorAgeRestrictedTest.java b/extractor/src/test/java/org/schabi/newpipe/extractor/services/youtube/stream/YoutubeStreamExtractorAgeRestrictedTest.java index 3710b4ee1..95c8aeb43 100644 --- a/extractor/src/test/java/org/schabi/newpipe/extractor/services/youtube/stream/YoutubeStreamExtractorAgeRestrictedTest.java +++ b/extractor/src/test/java/org/schabi/newpipe/extractor/services/youtube/stream/YoutubeStreamExtractorAgeRestrictedTest.java @@ -48,6 +48,8 @@ public class YoutubeStreamExtractorAgeRestrictedTest { public void testGetValidTimeStamp() throws IOException, ExtractionException { StreamExtractor extractor = YouTube.getStreamExtractor("https://youtu.be/FmG385_uUys?t=174"); assertEquals(extractor.getTimeStamp() + "", "174"); + extractor = YouTube.getStreamExtractor("https://youtube.com/embed/FmG385_uUys?start=174"); + assertEquals(extractor.getTimeStamp() + "", "174"); } @Test diff --git a/extractor/src/test/java/org/schabi/newpipe/extractor/services/youtube/stream/YoutubeStreamExtractorUnlistedTest.java b/extractor/src/test/java/org/schabi/newpipe/extractor/services/youtube/stream/YoutubeStreamExtractorUnlistedTest.java index 5dcd73ecd..49a851f4f 100644 --- a/extractor/src/test/java/org/schabi/newpipe/extractor/services/youtube/stream/YoutubeStreamExtractorUnlistedTest.java +++ b/extractor/src/test/java/org/schabi/newpipe/extractor/services/youtube/stream/YoutubeStreamExtractorUnlistedTest.java @@ -129,11 +129,6 @@ public class YoutubeStreamExtractorUnlistedTest { assertSame(StreamType.VIDEO_STREAM, extractor.getStreamType()); } - @Test - public void testGetNextVideo() throws ExtractionException { - assertNull(extractor.getNextStream()); - } - @Test public void testGetRelatedVideos() throws ExtractionException { StreamInfoItemsCollector relatedVideos = extractor.getRelatedStreams(); diff --git a/extractor/src/test/java/org/schabi/newpipe/extractor/utils/UtilsTest.java b/extractor/src/test/java/org/schabi/newpipe/extractor/utils/UtilsTest.java index 578867445..5b0dfdb3d 100644 --- a/extractor/src/test/java/org/schabi/newpipe/extractor/utils/UtilsTest.java +++ b/extractor/src/test/java/org/schabi/newpipe/extractor/utils/UtilsTest.java @@ -1,18 +1,24 @@ package org.schabi.newpipe.extractor.utils; -import com.grack.nanojson.JsonParserException; import org.junit.Test; import org.schabi.newpipe.extractor.exceptions.ParsingException; +import java.util.Arrays; + import static org.junit.Assert.assertEquals; public class UtilsTest { @Test - public void testMixedNumberWordToLong() throws JsonParserException, ParsingException { + public void testMixedNumberWordToLong() throws ParsingException { assertEquals(10, Utils.mixedNumberWordToLong("10")); assertEquals(10.5e3, Utils.mixedNumberWordToLong("10.5K"), 0.0); assertEquals(10.5e6, Utils.mixedNumberWordToLong("10.5M"), 0.0); assertEquals(10.5e6, Utils.mixedNumberWordToLong("10,5M"), 0.0); assertEquals(1.5e9, Utils.mixedNumberWordToLong("1,5B"), 0.0); } + + @Test + public void testJoin() { + assertEquals("some,random,stuff", Utils.join(",", Arrays.asList("some", "random", "stuff"))); + } }