Introduce Page class

This commit is contained in:
wb9688 2020-04-15 14:09:46 +02:00
parent e3bfdba135
commit 4cc312086a
43 changed files with 477 additions and 429 deletions

View File

@ -4,14 +4,14 @@ import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler;
import org.schabi.newpipe.extractor.utils.Utils;
import javax.annotation.Nonnull;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import javax.annotation.Nonnull;
import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty;
/**
* Base class to extractors that have a list (e.g. playlists, users).
*/
@ -37,8 +37,8 @@ public abstract class ListExtractor<R extends InfoItem> extends Extractor {
}
/**
* A {@link InfoItemsPage InfoItemsPage} corresponding to the initial page where the items are from the initial request and
* the nextPageUrl relative to it.
* A {@link InfoItemsPage InfoItemsPage} corresponding to the initial page
* where the items are from the initial request and the nextPage relative to it.
*
* @return a {@link InfoItemsPage} corresponding to the initial page
*/
@ -48,11 +48,11 @@ public abstract class ListExtractor<R extends InfoItem> extends Extractor {
/**
* Get a list of items corresponding to the specific requested page.
*
* @param pageUrl any page url got from the exclusive implementation of the list extractor
* @param page any page got from the exclusive implementation of the list extractor
* @return a {@link InfoItemsPage} corresponding to the requested page
* @see InfoItemsPage#getNextPageUrl()
* @see InfoItemsPage#getNextPage()
*/
public abstract InfoItemsPage<R> getPage(final String pageUrl) throws IOException, ExtractionException;
public abstract InfoItemsPage<R> getPage(final Page page) throws IOException, ExtractionException;
@Override
public ListLinkHandler getLinkHandler() {
@ -65,23 +65,22 @@ public abstract class ListExtractor<R extends InfoItem> extends Extractor {
/**
* A class that is used to wrap a list of gathered items and eventual errors, it
* also contains a field that points to the next available page ({@link #nextPageUrl}).
* also contains a field that points to the next available page ({@link #nextPage}).
*/
public static class InfoItemsPage<T extends InfoItem> {
private static final InfoItemsPage<InfoItem> EMPTY =
new InfoItemsPage<>(Collections.<InfoItem>emptyList(), "", Collections.<Throwable>emptyList());
new InfoItemsPage<>(Collections.<InfoItem>emptyList(), null, Collections.<Throwable>emptyList());
/**
* A convenient method that returns a representation of an empty page.
*
* @return a type-safe page with the list of items and errors empty and the nextPageUrl set to an empty string.
* @return a type-safe page with the list of items and errors empty and the nextPage set to {@code null}.
*/
public static <T extends InfoItem> InfoItemsPage<T> emptyPage() {
//noinspection unchecked
return (InfoItemsPage<T>) EMPTY;
}
/**
* The current list of items of this page
*/
@ -90,35 +89,37 @@ public abstract class ListExtractor<R extends InfoItem> extends Extractor {
/**
* Url pointing to the next page relative to this one
*
* @see ListExtractor#getPage(String)
* @see ListExtractor#getPage(Page)
* @see Page
*/
private final String nextPageUrl;
private final Page nextPage;
/**
* Errors that happened during the extraction
*/
private final List<Throwable> errors;
public InfoItemsPage(InfoItemsCollector<T, ?> collector, String nextPageUrl) {
this(collector.getItems(), nextPageUrl, collector.getErrors());
public InfoItemsPage(InfoItemsCollector<T, ?> collector, Page nextPage) {
this(collector.getItems(), nextPage, collector.getErrors());
}
public InfoItemsPage(List<T> itemsList, String nextPageUrl, List<Throwable> errors) {
public InfoItemsPage(List<T> itemsList, Page nextPage, List<Throwable> errors) {
this.itemsList = itemsList;
this.nextPageUrl = nextPageUrl;
this.nextPage = nextPage;
this.errors = errors;
}
public boolean hasNextPage() {
return !isNullOrEmpty(nextPageUrl);
return nextPage != null && (!isNullOrEmpty(nextPage.getUrl())
|| !isNullOrEmpty(nextPage.getIds()));
}
public List<T> getItems() {
return itemsList;
}
public String getNextPageUrl() {
return nextPageUrl;
public Page getNextPage() {
return nextPage;
}
public List<Throwable> getErrors() {

View File

@ -8,7 +8,7 @@ import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty;
public abstract class ListInfo<T extends InfoItem> extends Info {
private List<T> relatedItems;
private String nextPageUrl = null;
private Page nextPage = null;
private final List<String> contentFilters;
private final String sortFilter;
@ -39,15 +39,16 @@ public abstract class ListInfo<T extends InfoItem> extends Info {
}
public boolean hasNextPage() {
return !isNullOrEmpty(nextPageUrl);
return nextPage != null && (!isNullOrEmpty(nextPage.getUrl())
|| !isNullOrEmpty(nextPage.getIds()));
}
public String getNextPageUrl() {
return nextPageUrl;
public Page getNextPage() {
return nextPage;
}
public void setNextPageUrl(String pageUrl) {
this.nextPageUrl = pageUrl;
public void setNextPage(Page page) {
this.nextPage = page;
}
public List<String> getContentFilters() {

View File

@ -0,0 +1,45 @@
package org.schabi.newpipe.extractor;
import java.io.Serializable;
import java.util.List;
import java.util.Map;
public class Page implements Serializable {
private final String url;
private final List<String> ids;
private final Map<String, String> cookies;
public Page(final String url, final List<String> ids, final Map<String, String> cookies) {
this.url = url;
this.ids = ids;
this.cookies = cookies;
}
public Page(final String url) {
this(url, null, null);
}
public Page(final String url, final Map<String, String> cookies) {
this(url, null, cookies);
}
public Page(final List<String> ids) {
this(null, ids, null);
}
public Page(final List<String> ids, final Map<String, String> cookies) {
this(null, ids, cookies);
}
public String getUrl() {
return url;
}
public List<String> getIds() {
return ids;
}
public Map<String, String> getCookies() {
return cookies;
}
}

View File

@ -3,6 +3,7 @@ package org.schabi.newpipe.extractor.channel;
import org.schabi.newpipe.extractor.ListExtractor.InfoItemsPage;
import org.schabi.newpipe.extractor.ListInfo;
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler;
@ -49,8 +50,8 @@ public class ChannelInfo extends ListInfo<StreamInfoItem> {
public static InfoItemsPage<StreamInfoItem> getMoreItems(StreamingService service,
String url,
String pageUrl) throws IOException, ExtractionException {
return service.getChannelExtractor(url).getPage(pageUrl);
Page page) throws IOException, ExtractionException {
return service.getChannelExtractor(url).getPage(page);
}
public static ChannelInfo getInfo(ChannelExtractor extractor) throws IOException, ExtractionException {
@ -81,7 +82,7 @@ public class ChannelInfo extends ListInfo<StreamInfoItem> {
final InfoItemsPage<StreamInfoItem> itemsPage = ExtractorHelper.getItemsPageOrLogError(info, extractor);
info.setRelatedItems(itemsPage.getItems());
info.setNextPageUrl(itemsPage.getNextPageUrl());
info.setNextPage(itemsPage.getNextPage());
try {
info.setSubscriberCount(extractor.getSubscriberCount());

View File

@ -3,6 +3,7 @@ package org.schabi.newpipe.extractor.comments;
import org.schabi.newpipe.extractor.ListExtractor.InfoItemsPage;
import org.schabi.newpipe.extractor.ListInfo;
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler;
@ -39,23 +40,23 @@ public class CommentsInfo extends ListInfo<CommentsInfoItem> {
InfoItemsPage<CommentsInfoItem> initialCommentsPage = ExtractorHelper.getItemsPageOrLogError(commentsInfo,
commentsExtractor);
commentsInfo.setRelatedItems(initialCommentsPage.getItems());
commentsInfo.setNextPageUrl(initialCommentsPage.getNextPageUrl());
commentsInfo.setNextPage(initialCommentsPage.getNextPage());
return commentsInfo;
}
public static InfoItemsPage<CommentsInfoItem> getMoreItems(CommentsInfo commentsInfo, String pageUrl)
public static InfoItemsPage<CommentsInfoItem> getMoreItems(CommentsInfo commentsInfo, Page page)
throws ExtractionException, IOException {
return getMoreItems(NewPipe.getService(commentsInfo.getServiceId()), commentsInfo, pageUrl);
return getMoreItems(NewPipe.getService(commentsInfo.getServiceId()), commentsInfo, page);
}
public static InfoItemsPage<CommentsInfoItem> getMoreItems(StreamingService service, CommentsInfo commentsInfo,
String pageUrl) throws IOException, ExtractionException {
Page page) throws IOException, ExtractionException {
if (null == commentsInfo.getCommentsExtractor()) {
commentsInfo.setCommentsExtractor(service.getCommentsExtractor(commentsInfo.getUrl()));
commentsInfo.getCommentsExtractor().fetchPage();
}
return commentsInfo.getCommentsExtractor().getPage(pageUrl);
return commentsInfo.getCommentsExtractor().getPage(page);
}
private transient CommentsExtractor commentsExtractor;

View File

@ -45,7 +45,7 @@ public class FeedInfo extends ListInfo<StreamInfoItem> {
final InfoItemsPage<StreamInfoItem> itemsPage = ExtractorHelper.getItemsPageOrLogError(info, extractor);
info.setRelatedItems(itemsPage.getItems());
info.setNextPageUrl(itemsPage.getNextPageUrl());
info.setNextPage(itemsPage.getNextPage());
return info;
}

View File

@ -23,6 +23,7 @@ package org.schabi.newpipe.extractor.kiosk;
import org.schabi.newpipe.extractor.ListExtractor;
import org.schabi.newpipe.extractor.ListInfo;
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
@ -33,18 +34,17 @@ import org.schabi.newpipe.extractor.utils.ExtractorHelper;
import java.io.IOException;
public class KioskInfo extends ListInfo<StreamInfoItem> {
private KioskInfo(int serviceId, ListLinkHandler linkHandler, String name) throws ParsingException {
super(serviceId, linkHandler, name);
}
public static ListExtractor.InfoItemsPage<StreamInfoItem> getMoreItems(StreamingService service,
String url,
String pageUrl)
Page page)
throws IOException, ExtractionException {
KioskList kl = service.getKioskList();
KioskExtractor extractor = kl.getExtractorByUrl(url, pageUrl);
return extractor.getPage(pageUrl);
KioskExtractor extractor = kl.getExtractorByUrl(url, page);
return extractor.getPage(page);
}
public static KioskInfo getInfo(String url) throws IOException, ExtractionException {
@ -71,7 +71,7 @@ public class KioskInfo extends ListInfo<StreamInfoItem> {
final ListExtractor.InfoItemsPage<StreamInfoItem> itemsPage = ExtractorHelper.getItemsPageOrLogError(info, extractor);
info.setRelatedItems(itemsPage.getItems());
info.setNextPageUrl(itemsPage.getNextPageUrl());
info.setNextPage(itemsPage.getNextPage());
return info;
}

View File

@ -1,6 +1,7 @@
package org.schabi.newpipe.extractor.kiosk;
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandlerFactory;
@ -59,23 +60,23 @@ public class KioskList {
public KioskExtractor getDefaultKioskExtractor()
throws ExtractionException, IOException {
return getDefaultKioskExtractor("");
return getDefaultKioskExtractor(null);
}
public KioskExtractor getDefaultKioskExtractor(String nextPageUrl)
public KioskExtractor getDefaultKioskExtractor(Page nextPage)
throws ExtractionException, IOException {
return getDefaultKioskExtractor(nextPageUrl, NewPipe.getPreferredLocalization());
return getDefaultKioskExtractor(nextPage, NewPipe.getPreferredLocalization());
}
public KioskExtractor getDefaultKioskExtractor(String nextPageUrl, Localization localization)
public KioskExtractor getDefaultKioskExtractor(Page nextPage, Localization localization)
throws ExtractionException, IOException {
if (defaultKiosk != null && !defaultKiosk.equals("")) {
return getExtractorById(defaultKiosk, nextPageUrl, localization);
return getExtractorById(defaultKiosk, nextPage, localization);
} else {
if (!kioskList.isEmpty()) {
// if not set get any entry
Object[] keySet = kioskList.keySet().toArray();
return getExtractorById(keySet[0].toString(), nextPageUrl, localization);
return getExtractorById(keySet[0].toString(), nextPage, localization);
} else {
return null;
}
@ -86,12 +87,12 @@ public class KioskList {
return defaultKiosk;
}
public KioskExtractor getExtractorById(String kioskId, String nextPageUrl)
public KioskExtractor getExtractorById(String kioskId, Page nextPage)
throws ExtractionException, IOException {
return getExtractorById(kioskId, nextPageUrl, NewPipe.getPreferredLocalization());
return getExtractorById(kioskId, nextPage, NewPipe.getPreferredLocalization());
}
public KioskExtractor getExtractorById(String kioskId, String nextPageUrl, Localization localization)
public KioskExtractor getExtractorById(String kioskId, Page nextPage, Localization localization)
throws ExtractionException, IOException {
KioskEntry ke = kioskList.get(kioskId);
if (ke == null) {
@ -111,17 +112,17 @@ public class KioskList {
return kioskList.keySet();
}
public KioskExtractor getExtractorByUrl(String url, String nextPageUrl)
public KioskExtractor getExtractorByUrl(String url, Page nextPage)
throws ExtractionException, IOException {
return getExtractorByUrl(url, nextPageUrl, NewPipe.getPreferredLocalization());
return getExtractorByUrl(url, nextPage, NewPipe.getPreferredLocalization());
}
public KioskExtractor getExtractorByUrl(String url, String nextPageUrl, Localization localization)
public KioskExtractor getExtractorByUrl(String url, Page nextPage, Localization localization)
throws ExtractionException, IOException {
for (Map.Entry<String, KioskEntry> e : kioskList.entrySet()) {
KioskEntry ke = e.getValue();
if (ke.handlerFactory.acceptUrl(url)) {
return getExtractorById(ke.handlerFactory.getId(url), nextPageUrl, localization);
return getExtractorById(ke.handlerFactory.getId(url), nextPage, localization);
}
}
throw new ExtractionException("Could not find a kiosk that fits to the url: " + url);

View File

@ -3,6 +3,7 @@ package org.schabi.newpipe.extractor.playlist;
import org.schabi.newpipe.extractor.ListExtractor.InfoItemsPage;
import org.schabi.newpipe.extractor.ListInfo;
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
@ -32,8 +33,8 @@ public class PlaylistInfo extends ListInfo<StreamInfoItem> {
public static InfoItemsPage<StreamInfoItem> getMoreItems(StreamingService service,
String url,
String pageUrl) throws IOException, ExtractionException {
return service.getPlaylistExtractor(url).getPage(pageUrl);
Page page) throws IOException, ExtractionException {
return service.getPlaylistExtractor(url).getPage(page);
}
/**
@ -112,7 +113,7 @@ public class PlaylistInfo extends ListInfo<StreamInfoItem> {
final InfoItemsPage<StreamInfoItem> itemsPage = ExtractorHelper.getItemsPageOrLogError(info, extractor);
info.setRelatedItems(itemsPage.getItems());
info.setNextPageUrl(itemsPage.getNextPageUrl());
info.setNextPage(itemsPage.getNextPage());
return info;
}

View File

@ -3,6 +3,7 @@ package org.schabi.newpipe.extractor.search;
import org.schabi.newpipe.extractor.InfoItem;
import org.schabi.newpipe.extractor.ListExtractor;
import org.schabi.newpipe.extractor.ListInfo;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.linkhandler.SearchQueryHandler;
@ -10,9 +11,7 @@ import org.schabi.newpipe.extractor.utils.ExtractorHelper;
import java.io.IOException;
public class SearchInfo extends ListInfo<InfoItem> {
private String searchString;
private String searchSuggestion;
private boolean isCorrectedSearch;
@ -55,7 +54,7 @@ public class SearchInfo extends ListInfo<InfoItem> {
ListExtractor.InfoItemsPage<InfoItem> page = ExtractorHelper.getItemsPageOrLogError(info, extractor);
info.setRelatedItems(page.getItems());
info.setNextPageUrl(page.getNextPageUrl());
info.setNextPage(page.getNextPage());
return info;
}
@ -63,9 +62,9 @@ public class SearchInfo extends ListInfo<InfoItem> {
public static ListExtractor.InfoItemsPage<InfoItem> getMoreItems(StreamingService service,
SearchQueryHandler query,
String pageUrl)
Page page)
throws IOException, ExtractionException {
return service.getSearchExtractor(query).getPage(pageUrl);
return service.getSearchExtractor(query).getPage(page);
}
// Getter

View File

@ -5,6 +5,7 @@ import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import com.grack.nanojson.JsonParserException;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.channel.ChannelExtractor;
import org.schabi.newpipe.extractor.downloader.Downloader;
@ -79,8 +80,9 @@ public class MediaCCCConferenceExtractor extends ChannelExtractor {
}
@Override
public InfoItemsPage<StreamInfoItem> getPage(final String pageUrl) {
return null;
public InfoItemsPage<StreamInfoItem> getPage(final Page page) {
return InfoItemsPage.emptyPage();
}
@Override

View File

@ -5,6 +5,7 @@ import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import com.grack.nanojson.JsonParserException;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.channel.ChannelInfoItem;
import org.schabi.newpipe.extractor.channel.ChannelInfoItemsCollector;
@ -37,11 +38,12 @@ public class MediaCCCConferenceKiosk extends KioskExtractor<ChannelInfoItem> {
collector.commit(new MediaCCCConferenceInfoItemExtractor(conferences.getObject(i)));
}
return new InfoItemsPage<>(collector, "");
return new InfoItemsPage<>(collector, null);
}
@Override
public InfoItemsPage<ChannelInfoItem> getPage(final String pageUrl) {
public InfoItemsPage<ChannelInfoItem> getPage(final Page page) {
return InfoItemsPage.emptyPage();
}

View File

@ -6,6 +6,7 @@ import com.grack.nanojson.JsonParser;
import com.grack.nanojson.JsonParserException;
import org.schabi.newpipe.extractor.InfoItem;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.channel.ChannelInfoItem;
import org.schabi.newpipe.extractor.channel.ChannelInfoItemExtractor;
@ -80,7 +81,7 @@ public class MediaCCCSearchExtractor extends SearchExtractor {
}
@Override
public InfoItemsPage<InfoItem> getPage(final String pageUrl) {
public InfoItemsPage<InfoItem> getPage(final Page page) {
return InfoItemsPage.emptyPage();
}

View File

@ -2,6 +2,7 @@ package org.schabi.newpipe.extractor.services.peertube;
import com.grack.nanojson.JsonObject;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.exceptions.ContentNotAvailableException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.utils.Parser;
@ -14,7 +15,6 @@ import java.util.Date;
import java.util.TimeZone;
public class PeertubeParsingHelper {
public static final String START_KEY = "start";
public static final String COUNT_KEY = "count";
public static final int ITEMS_PER_PAGE = 12;
@ -23,17 +23,17 @@ public class PeertubeParsingHelper {
private PeertubeParsingHelper() {
}
public static void validate(JsonObject json) throws ContentNotAvailableException {
String error = json.getString("error");
public static void validate(final JsonObject json) throws ContentNotAvailableException {
final String error = json.getString("error");
if (!Utils.isBlank(error)) {
throw new ContentNotAvailableException(error);
}
}
public static Calendar parseDateFrom(String textualUploadDate) throws ParsingException {
Date date;
public static Calendar parseDateFrom(final String textualUploadDate) throws ParsingException {
final Date date;
try {
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.S'Z'");
final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.S'Z'");
sdf.setTimeZone(TimeZone.getTimeZone("GMT"));
date = sdf.parse(textualUploadDate);
} catch (ParseException e) {
@ -45,26 +45,25 @@ public class PeertubeParsingHelper {
return uploadDate;
}
public static String getNextPageUrl(String prevPageUrl, long total) {
String prevStart;
public static Page getNextPage(final String prevPageUrl, final long total) {
final String prevStart;
try {
prevStart = Parser.matchGroup1(START_PATTERN, prevPageUrl);
} catch (Parser.RegexException e) {
return "";
return null;
}
if (Utils.isBlank(prevStart)) return "";
long nextStart = 0;
if (Utils.isBlank(prevStart)) return null;
final long nextStart;
try {
nextStart = Long.parseLong(prevStart) + ITEMS_PER_PAGE;
} catch (NumberFormatException e) {
return "";
return null;
}
if (nextStart >= total) {
return "";
return null;
} else {
return prevPageUrl.replace(START_KEY + "=" + prevStart, START_KEY + "=" + nextStart);
return new Page(prevPageUrl.replace(START_KEY + "=" + prevStart, START_KEY + "=" + nextStart));
}
}
}

View File

@ -5,6 +5,7 @@ import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import com.grack.nanojson.JsonParserException;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.channel.ChannelExtractor;
import org.schabi.newpipe.extractor.downloader.Downloader;
@ -87,15 +88,15 @@ public class PeertubeAccountExtractor extends ChannelExtractor {
@Override
public InfoItemsPage<StreamInfoItem> getInitialPage() throws IOException, ExtractionException {
final String pageUrl = getUrl() + "/videos?" + START_KEY + "=0&" + COUNT_KEY + "=" + ITEMS_PER_PAGE;
return getPage(pageUrl);
return getPage(new Page(pageUrl));
}
private void collectStreamsFrom(StreamInfoItemsCollector collector, JsonObject json) throws ParsingException {
private void collectStreamsFrom(final StreamInfoItemsCollector collector, final JsonObject json) throws ParsingException {
final JsonArray contents;
try {
contents = (JsonArray) JsonUtils.getValue(json, "data");
} catch (Exception e) {
throw new ParsingException("unable to extract channel streams", e);
throw new ParsingException("Unable to extract account streams", e);
}
for (final Object c : contents) {
@ -108,27 +109,28 @@ public class PeertubeAccountExtractor extends ChannelExtractor {
}
@Override
public InfoItemsPage<StreamInfoItem> getPage(final String pageUrl) throws IOException, ExtractionException {
final Response response = getDownloader().get(pageUrl);
public InfoItemsPage<StreamInfoItem> getPage(final Page page) throws IOException, ExtractionException {
final Response response = getDownloader().get(page.getUrl());
JsonObject json = null;
if (response != null && !Utils.isBlank(response.responseBody())) {
try {
json = JsonParser.object().from(response.responseBody());
} catch (Exception e) {
throw new ParsingException("Could not parse json data for kiosk info", e);
throw new ParsingException("Could not parse json data for account info", e);
}
}
final StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
final long total;
if (json != null) {
PeertubeParsingHelper.validate(json);
total = JsonUtils.getNumber(json, "total").longValue();
final long total = JsonUtils.getNumber(json, "total").longValue();
final StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
collectStreamsFrom(collector, json);
return new InfoItemsPage<>(collector, PeertubeParsingHelper.getNextPage(page.getUrl(), total));
} else {
throw new ExtractionException("Unable to get PeerTube kiosk info");
throw new ExtractionException("Unable to get PeerTube account info");
}
return new InfoItemsPage<>(collector, PeertubeParsingHelper.getNextPageUrl(pageUrl, total));
}
@Override
@ -137,7 +139,7 @@ public class PeertubeAccountExtractor extends ChannelExtractor {
if (response != null && response.responseBody() != null) {
setInitialData(response.responseBody());
} else {
throw new ExtractionException("Unable to extract PeerTube channel data");
throw new ExtractionException("Unable to extract PeerTube account data");
}
}
@ -145,9 +147,9 @@ public class PeertubeAccountExtractor extends ChannelExtractor {
try {
json = JsonParser.object().from(responseBody);
} catch (JsonParserException e) {
throw new ExtractionException("Unable to extract PeerTube channel data", e);
throw new ExtractionException("Unable to extract PeerTube account data", e);
}
if (json == null) throw new ExtractionException("Unable to extract PeerTube channel data");
if (json == null) throw new ExtractionException("Unable to extract PeerTube account data");
}
@Override

View File

@ -5,6 +5,7 @@ import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import com.grack.nanojson.JsonParserException;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.channel.ChannelExtractor;
import org.schabi.newpipe.extractor.downloader.Downloader;
@ -93,7 +94,7 @@ public class PeertubeChannelExtractor extends ChannelExtractor {
@Override
public InfoItemsPage<StreamInfoItem> getInitialPage() throws IOException, ExtractionException {
final String pageUrl = getUrl() + "/videos?" + START_KEY + "=0&" + COUNT_KEY + "=" + ITEMS_PER_PAGE;
return getPage(pageUrl);
return getPage(new Page(pageUrl));
}
private void collectStreamsFrom(final StreamInfoItemsCollector collector, final JsonObject json) throws ParsingException {
@ -101,7 +102,7 @@ public class PeertubeChannelExtractor extends ChannelExtractor {
try {
contents = (JsonArray) JsonUtils.getValue(json, "data");
} catch (Exception e) {
throw new ParsingException("unable to extract channel streams", e);
throw new ParsingException("Unable to extract channel streams", e);
}
for (final Object c : contents) {
@ -114,27 +115,28 @@ public class PeertubeChannelExtractor extends ChannelExtractor {
}
@Override
public InfoItemsPage<StreamInfoItem> getPage(final String pageUrl) throws IOException, ExtractionException {
final Response response = getDownloader().get(pageUrl);
public InfoItemsPage<StreamInfoItem> getPage(final Page page) throws IOException, ExtractionException {
final Response response = getDownloader().get(page.getUrl());
JsonObject json = null;
if (response != null && !Utils.isBlank(response.responseBody())) {
try {
json = JsonParser.object().from(response.responseBody());
} catch (Exception e) {
throw new ParsingException("Could not parse json data for kiosk info", e);
throw new ParsingException("Could not parse json data for channel info", e);
}
}
final StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
final long total;
if (json != null) {
PeertubeParsingHelper.validate(json);
total = JsonUtils.getNumber(json, "total").longValue();
final long total = JsonUtils.getNumber(json, "total").longValue();
final StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
collectStreamsFrom(collector, json);
return new InfoItemsPage<>(collector, PeertubeParsingHelper.getNextPage(page.getUrl(), total));
} else {
throw new ExtractionException("Unable to get PeerTube kiosk info");
throw new ExtractionException("Unable to get PeerTube channel info");
}
return new InfoItemsPage<>(collector, PeertubeParsingHelper.getNextPageUrl(pageUrl, total));
}
@Override

View File

@ -4,6 +4,7 @@ import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.comments.CommentsExtractor;
import org.schabi.newpipe.extractor.comments.CommentsInfoItem;
@ -24,8 +25,6 @@ import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelp
import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.START_KEY;
public class PeertubeCommentsExtractor extends CommentsExtractor {
private long total;
public PeertubeCommentsExtractor(final StreamingService service, final ListLinkHandler uiHandler) {
super(service, uiHandler);
}
@ -33,7 +32,7 @@ public class PeertubeCommentsExtractor extends CommentsExtractor {
@Override
public InfoItemsPage<CommentsInfoItem> getInitialPage() throws IOException, ExtractionException {
final String pageUrl = getUrl() + "?" + START_KEY + "=0&" + COUNT_KEY + "=" + ITEMS_PER_PAGE;
return getPage(pageUrl);
return getPage(new Page(pageUrl));
}
private void collectCommentsFrom(final CommentsInfoItemsCollector collector, final JsonObject json) throws ParsingException {
@ -51,8 +50,8 @@ public class PeertubeCommentsExtractor extends CommentsExtractor {
}
@Override
public InfoItemsPage<CommentsInfoItem> getPage(final String pageUrl) throws IOException, ExtractionException {
final Response response = getDownloader().get(pageUrl);
public InfoItemsPage<CommentsInfoItem> getPage(final Page page) throws IOException, ExtractionException {
final Response response = getDownloader().get(page.getUrl());
JsonObject json = null;
if (response != null && !Utils.isBlank(response.responseBody())) {
try {
@ -62,16 +61,19 @@ public class PeertubeCommentsExtractor extends CommentsExtractor {
}
}
final CommentsInfoItemsCollector collector = new CommentsInfoItemsCollector(getServiceId());
if (json != null) {
final Number number = JsonUtils.getNumber(json, "total");
if (number != null) this.total = number.longValue();
PeertubeParsingHelper.validate(json);
final long total = JsonUtils.getNumber(json, "total").longValue();
final CommentsInfoItemsCollector collector = new CommentsInfoItemsCollector(getServiceId());
collectCommentsFrom(collector, json);
return new InfoItemsPage<>(collector, PeertubeParsingHelper.getNextPage(page.getUrl(), total));
} else {
throw new ExtractionException("Unable to get peertube comments info");
throw new ExtractionException("Unable to get PeerTube kiosk info");
}
return new InfoItemsPage<>(collector, PeertubeParsingHelper.getNextPageUrl(pageUrl, total));
}
public void onFetchPage(final Downloader downloader) throws IOException, ExtractionException { }
@Override
public void onFetchPage(Downloader downloader) { }
}

View File

@ -5,6 +5,7 @@ import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import com.grack.nanojson.JsonParserException;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.downloader.Response;
@ -16,6 +17,7 @@ import org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import org.schabi.newpipe.extractor.utils.JsonUtils;
import org.schabi.newpipe.extractor.utils.Utils;
import java.io.IOException;
@ -27,7 +29,6 @@ import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelp
public class PeertubePlaylistExtractor extends PlaylistExtractor {
private JsonObject playlistInfo;
private String initialPageUrl;
public PeertubePlaylistExtractor(final StreamingService service, final ListLinkHandler linkHandler) {
super(service, linkHandler);
@ -84,31 +85,50 @@ public class PeertubePlaylistExtractor extends PlaylistExtractor {
@Nonnull
@Override
public InfoItemsPage<StreamInfoItem> getInitialPage() throws IOException, ExtractionException {
return getPage(initialPageUrl);
return getPage(new Page(getUrl() + "/videos?" + START_KEY + "=0&" + COUNT_KEY + "=" + ITEMS_PER_PAGE));
}
private void collectStreamsFrom(final StreamInfoItemsCollector collector, final JsonObject json) throws ParsingException {
final JsonArray contents;
try {
contents = (JsonArray) JsonUtils.getValue(json, "data");
} catch (Exception e) {
throw new ParsingException("Unable to extract playlist streams", e);
}
final String baseUrl = getBaseUrl();
for (final Object c : contents) {
if (c instanceof JsonObject) {
final JsonObject item = (JsonObject) c;
final PeertubeStreamInfoItemExtractor extractor = new PeertubeStreamInfoItemExtractor(item, baseUrl);
collector.commit(extractor);
}
}
}
@Override
public InfoItemsPage<StreamInfoItem> getPage(final String pageUrl) throws IOException, ExtractionException {
final Response response = getDownloader().get(pageUrl);
final JsonObject playlistVideos;
public InfoItemsPage<StreamInfoItem> getPage(final Page page) throws IOException, ExtractionException {
final Response response = getDownloader().get(page.getUrl());
JsonObject json = null;
if (response != null && !Utils.isBlank(response.responseBody())) {
try {
playlistVideos = JsonParser.object().from(response.responseBody());
} catch (JsonParserException jpe) {
throw new ExtractionException("Could not parse json", jpe);
json = JsonParser.object().from(response.responseBody());
} catch (Exception e) {
throw new ParsingException("Could not parse json data for playlist info", e);
}
}
PeertubeParsingHelper.validate(playlistVideos);
final long total = JsonUtils.getNumber(playlistVideos, "total").longValue();
if (json != null) {
PeertubeParsingHelper.validate(json);
final long total = JsonUtils.getNumber(json, "total").longValue();
final StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
collectStreamsFrom(collector, json);
final JsonArray videos = playlistVideos.getArray("data");
for (final Object o : videos) {
final JsonObject video = ((JsonObject) o).getObject("video");
collector.commit(new PeertubeStreamInfoItemExtractor(video, getBaseUrl()));
return new InfoItemsPage<>(collector, PeertubeParsingHelper.getNextPage(page.getUrl(), total));
} else {
throw new ExtractionException("Unable to get PeerTube playlist info");
}
return new InfoItemsPage<>(collector, PeertubeParsingHelper.getNextPageUrl(pageUrl, total));
}
@Override
@ -120,7 +140,6 @@ public class PeertubePlaylistExtractor extends PlaylistExtractor {
throw new ExtractionException("Could not parse json", jpe);
}
PeertubeParsingHelper.validate(playlistInfo);
initialPageUrl = getUrl() + "/videos?" + START_KEY + "=0&" + COUNT_KEY + "=" + ITEMS_PER_PAGE;
}
@Nonnull

View File

@ -5,8 +5,7 @@ import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import org.schabi.newpipe.extractor.InfoItem;
import org.schabi.newpipe.extractor.InfoItemExtractor;
import org.schabi.newpipe.extractor.InfoItemsCollector;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.downloader.Response;
@ -46,12 +45,10 @@ public class PeertubeSearchExtractor extends SearchExtractor {
@Override
public InfoItemsPage<InfoItem> getInitialPage() throws IOException, ExtractionException {
final String pageUrl = getUrl() + "&" + START_KEY + "=0&" + COUNT_KEY + "=" + ITEMS_PER_PAGE;
return getPage(pageUrl);
return getPage(new Page(pageUrl));
}
private InfoItemsCollector<InfoItem, InfoItemExtractor> collectStreamsFrom(final JsonObject json) throws ParsingException {
final InfoItemsSearchCollector collector = new InfoItemsSearchCollector(getServiceId());
private void collectStreamsFrom(final InfoItemsSearchCollector collector, final JsonObject json) throws ParsingException {
final JsonArray contents;
try {
contents = (JsonArray) JsonUtils.getValue(json, "data");
@ -67,13 +64,11 @@ public class PeertubeSearchExtractor extends SearchExtractor {
collector.commit(extractor);
}
}
return collector;
}
@Override
public InfoItemsPage<InfoItem> getPage(final String pageUrl) throws IOException, ExtractionException {
final Response response = getDownloader().get(pageUrl);
public InfoItemsPage<InfoItem> getPage(final Page page) throws IOException, ExtractionException {
final Response response = getDownloader().get(page.getUrl());
JsonObject json = null;
if (response != null && !Utils.isBlank(response.responseBody())) {
try {
@ -84,10 +79,15 @@ public class PeertubeSearchExtractor extends SearchExtractor {
}
if (json != null) {
PeertubeParsingHelper.validate(json);
final long total = JsonUtils.getNumber(json, "total").longValue();
return new InfoItemsPage<>(collectStreamsFrom(json), PeertubeParsingHelper.getNextPageUrl(pageUrl, total));
final InfoItemsSearchCollector collector = new InfoItemsSearchCollector(getServiceId());
collectStreamsFrom(collector, json);
return new InfoItemsPage<>(collector, PeertubeParsingHelper.getNextPage(page.getUrl(), total));
} else {
throw new ExtractionException("Unable to get peertube search info");
throw new ExtractionException("Unable to get PeerTube search info");
}
}

View File

@ -341,7 +341,7 @@ public class PeertubeStreamExtractor extends StreamExtractor {
if (response != null && response.responseBody() != null) {
setInitialData(response.responseBody());
} else {
throw new ExtractionException("Unable to extract peertube channel data");
throw new ExtractionException("Unable to extract PeerTube channel data");
}
loadSubtitles();
@ -351,9 +351,9 @@ public class PeertubeStreamExtractor extends StreamExtractor {
try {
json = JsonParser.object().from(responseBody);
} catch (JsonParserException e) {
throw new ExtractionException("Unable to extract peertube stream data", e);
throw new ExtractionException("Unable to extract PeerTube stream data", e);
}
if (json == null) throw new ExtractionException("Unable to extract peertube stream data");
if (json == null) throw new ExtractionException("Unable to extract PeerTube stream data");
PeertubeParsingHelper.validate(json);
}

View File

@ -4,6 +4,7 @@ import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.downloader.Response;
@ -19,8 +20,6 @@ import org.schabi.newpipe.extractor.utils.Utils;
import java.io.IOException;
import javax.annotation.Nonnull;
import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.COUNT_KEY;
import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.ITEMS_PER_PAGE;
import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.START_KEY;
@ -38,7 +37,7 @@ public class PeertubeTrendingExtractor extends KioskExtractor<StreamInfoItem> {
@Override
public InfoItemsPage<StreamInfoItem> getInitialPage() throws IOException, ExtractionException {
final String pageUrl = getUrl() + "&" + START_KEY + "=0&" + COUNT_KEY + "=" + ITEMS_PER_PAGE;
return getPage(pageUrl);
return getPage(new Page(pageUrl));
}
private void collectStreamsFrom(final StreamInfoItemsCollector collector, final JsonObject json) throws ParsingException {
@ -60,8 +59,8 @@ public class PeertubeTrendingExtractor extends KioskExtractor<StreamInfoItem> {
}
@Override
public InfoItemsPage<StreamInfoItem> getPage(final String pageUrl) throws IOException, ExtractionException {
final Response response = getDownloader().get(pageUrl);
public InfoItemsPage<StreamInfoItem> getPage(final Page page) throws IOException, ExtractionException {
final Response response = getDownloader().get(page.getUrl());
JsonObject json = null;
if (response != null && !Utils.isBlank(response.responseBody())) {
try {
@ -71,18 +70,19 @@ public class PeertubeTrendingExtractor extends KioskExtractor<StreamInfoItem> {
}
}
final StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
final long total;
if (json != null) {
final Number number = JsonUtils.getNumber(json, "total");
total = number.longValue();
PeertubeParsingHelper.validate(json);
final long total = JsonUtils.getNumber(json, "total").longValue();
final StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
collectStreamsFrom(collector, json);
return new InfoItemsPage<>(collector, PeertubeParsingHelper.getNextPage(page.getUrl(), total));
} else {
throw new ExtractionException("Unable to get peertube kiosk info");
throw new ExtractionException("Unable to get PeerTube kiosk info");
}
return new InfoItemsPage<>(collector, PeertubeParsingHelper.getNextPageUrl(pageUrl, total));
}
@Override
public void onFetchPage(@Nonnull final Downloader downloader) throws IOException, ExtractionException { }
public void onFetchPage(Downloader downloader) throws IOException, ExtractionException { }
}

View File

@ -4,6 +4,7 @@ import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import com.grack.nanojson.JsonParserException;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.channel.ChannelExtractor;
import org.schabi.newpipe.extractor.downloader.Downloader;
@ -109,21 +110,17 @@ public class SoundcloudChannelExtractor extends ChannelExtractor {
String nextPageUrl = SoundcloudParsingHelper.getStreamsFromApiMinItems(15, streamInfoItemsCollector, apiUrl);
return new InfoItemsPage<>(streamInfoItemsCollector, nextPageUrl);
return new InfoItemsPage<>(streamInfoItemsCollector, new Page(nextPageUrl));
} catch (Exception e) {
throw new ExtractionException("Could not get next page", e);
}
}
@Override
public InfoItemsPage<StreamInfoItem> getPage(final String pageUrl) throws IOException, ExtractionException {
if (isNullOrEmpty(pageUrl)) {
throw new ExtractionException(new IllegalArgumentException("Page url is empty or null"));
}
public InfoItemsPage<StreamInfoItem> getPage(final Page page) throws IOException, ExtractionException {
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
String nextPageUrl = SoundcloudParsingHelper.getStreamsFromApiMinItems(15, collector, pageUrl);
String nextPageUrl = SoundcloudParsingHelper.getStreamsFromApiMinItems(15, collector, page.getUrl());
return new InfoItemsPage<>(collector, nextPageUrl);
return new InfoItemsPage<>(collector, new Page(nextPageUrl));
}
}

View File

@ -1,5 +1,6 @@
package org.schabi.newpipe.extractor.services.soundcloud.extractors;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
@ -33,19 +34,11 @@ public class SoundcloudChartsExtractor extends KioskExtractor<StreamInfoItem> {
}
@Override
public InfoItemsPage<StreamInfoItem> getPage(String pageUrl) throws IOException, ExtractionException {
if (isNullOrEmpty(pageUrl)) {
throw new ExtractionException(new IllegalArgumentException("Page url is empty or null"));
}
public InfoItemsPage<StreamInfoItem> getPage(Page page) throws IOException, ExtractionException {
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
String nextPageUrl = SoundcloudParsingHelper.getStreamsFromApi(collector, pageUrl, true);
String nextPageUrl = SoundcloudParsingHelper.getStreamsFromApi(collector, page.getUrl(), true);
return new InfoItemsPage<>(collector, nextPageUrl);
}
private void computeNextPageAndStreams() throws IOException, ExtractionException {
return new InfoItemsPage<>(collector, new Page(nextPageUrl));
}
@Nonnull
@ -68,6 +61,6 @@ public class SoundcloudChartsExtractor extends KioskExtractor<StreamInfoItem> {
String nextPageUrl = SoundcloudParsingHelper.getStreamsFromApi(collector, apiUrl, true);
return new InfoItemsPage<>(collector, nextPageUrl);
return new InfoItemsPage<>(collector, new Page(nextPageUrl));
}
}

View File

@ -4,7 +4,9 @@ import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import com.grack.nanojson.JsonParserException;
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.comments.CommentsExtractor;
import org.schabi.newpipe.extractor.comments.CommentsInfoItem;
@ -15,53 +17,57 @@ import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler;
import javax.annotation.Nonnull;
import java.io.IOException;
import javax.annotation.Nonnull;
public class SoundcloudCommentsExtractor extends CommentsExtractor {
private JsonObject json;
public SoundcloudCommentsExtractor(StreamingService service, ListLinkHandler uiHandler) {
public SoundcloudCommentsExtractor(final StreamingService service, final ListLinkHandler uiHandler) {
super(service, uiHandler);
}
@Nonnull
@Override
public InfoItemsPage<CommentsInfoItem> getInitialPage() throws IOException, ExtractionException {
final CommentsInfoItemsCollector collector = new CommentsInfoItemsCollector(getServiceId());
public InfoItemsPage<CommentsInfoItem> getInitialPage() throws ExtractionException, IOException {
final Downloader downloader = NewPipe.getDownloader();
final Response response = downloader.get(getUrl());
collectStreamsFrom(collector, json.getArray("collection"));
return new InfoItemsPage<>(collector, json.getString("next_href"));
}
@Override
public InfoItemsPage<CommentsInfoItem> getPage(String pageUrl) throws IOException, ExtractionException {
Downloader dl = NewPipe.getDownloader();
Response rp = dl.get(pageUrl);
try {
json = JsonParser.object().from(rp.responseBody());
} catch (JsonParserException e) {
throw new ParsingException("Could not parse json", e);
}
final CommentsInfoItemsCollector collector = new CommentsInfoItemsCollector(getServiceId());
collectStreamsFrom(collector, json.getArray("collection"));
return new InfoItemsPage<>(collector, json.getString("next_href"));
}
@Override
public void onFetchPage(@Nonnull Downloader downloader) throws IOException, ExtractionException {
Response response = downloader.get(getUrl());
final JsonObject json;
try {
json = JsonParser.object().from(response.responseBody());
} catch (JsonParserException e) {
throw new ParsingException("Could not parse json", e);
}
final CommentsInfoItemsCollector collector = new CommentsInfoItemsCollector(getServiceId());
collectStreamsFrom(collector, json.getArray("collection"));
return new InfoItemsPage<>(collector, new Page(json.getString("next_href")));
}
@Override
public InfoItemsPage<CommentsInfoItem> getPage(final Page page) throws ExtractionException, IOException {
final Downloader downloader = NewPipe.getDownloader();
final Response response = downloader.get(page.getUrl());
final JsonObject json;
try {
json = JsonParser.object().from(response.responseBody());
} catch (JsonParserException e) {
throw new ParsingException("Could not parse json", e);
}
final CommentsInfoItemsCollector collector = new CommentsInfoItemsCollector(getServiceId());
collectStreamsFrom(collector, json.getArray("collection"));
return new InfoItemsPage<>(collector, new Page(json.getString("next_href")));
}
@Override
public void onFetchPage(@Nonnull final Downloader downloader) { }
private void collectStreamsFrom(final CommentsInfoItemsCollector collector, final JsonArray entries) throws ParsingException {
final String url = getUrl();
for (Object comment : entries) {

View File

@ -6,6 +6,7 @@ import com.grack.nanojson.JsonParser;
import com.grack.nanojson.JsonParserException;
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
@ -15,17 +16,19 @@ import org.schabi.newpipe.extractor.playlist.PlaylistExtractor;
import org.schabi.newpipe.extractor.services.soundcloud.SoundcloudParsingHelper;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import org.schabi.newpipe.extractor.utils.Utils;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty;
@SuppressWarnings("WeakerAccess")
public class SoundcloudPlaylistExtractor extends PlaylistExtractor {
private static final int streamsPerRequestedPage = 15;
private static final int STREAMS_PER_REQUESTED_PAGE = 15;
private String playlistId;
private JsonObject playlist;
@ -115,79 +118,65 @@ public class SoundcloudPlaylistExtractor extends PlaylistExtractor {
@Nonnull
@Override
public String getSubChannelName() throws ParsingException {
public String getSubChannelName() {
return "";
}
@Nonnull
@Override
public String getSubChannelUrl() throws ParsingException {
public String getSubChannelUrl() {
return "";
}
@Nonnull
@Override
public String getSubChannelAvatarUrl() throws ParsingException {
public String getSubChannelAvatarUrl() {
return "";
}
@Nonnull
@Override
public InfoItemsPage<StreamInfoItem> getInitialPage() throws IOException, ExtractionException {
StreamInfoItemsCollector streamInfoItemsCollector = new StreamInfoItemsCollector(getServiceId());
StringBuilder nextPageUrlBuilder = new StringBuilder("https://api-v2.soundcloud.com/tracks?client_id=");
nextPageUrlBuilder.append(SoundcloudParsingHelper.clientId());
nextPageUrlBuilder.append("&ids=");
public InfoItemsPage<StreamInfoItem> getInitialPage() {
final StreamInfoItemsCollector streamInfoItemsCollector = new StreamInfoItemsCollector(getServiceId());
final List<String> ids = new ArrayList<>();
JsonArray tracks = playlist.getArray("tracks");
final JsonArray tracks = playlist.getArray("tracks");
for (Object o : tracks) {
if (o instanceof JsonObject) {
JsonObject track = (JsonObject) o;
final JsonObject track = (JsonObject) o;
if (track.has("title")) { // i.e. if full info is available
streamInfoItemsCollector.commit(new SoundcloudStreamInfoItemExtractor(track));
} else {
// %09d would be enough, but a 0 before the number does not create problems, so let's be sure
nextPageUrlBuilder.append(String.format("%010d,", track.getInt("id")));
ids.add(String.format("%010d", track.getInt("id")));
}
}
}
nextPageUrlBuilder.setLength(nextPageUrlBuilder.length() - 1); // remove trailing ,
String nextPageUrl = nextPageUrlBuilder.toString();
if (nextPageUrl.endsWith("&ids")) {
// there are no other videos
nextPageUrl = "";
}
return new InfoItemsPage<>(streamInfoItemsCollector, nextPageUrl);
return new InfoItemsPage<>(streamInfoItemsCollector, new Page(ids));
}
@Override
public InfoItemsPage<StreamInfoItem> getPage(String pageUrl) throws IOException, ExtractionException {
if (isNullOrEmpty(pageUrl)) {
throw new ExtractionException(new IllegalArgumentException("Page url is empty or null"));
}
// see computeInitialTracksAndNextPageUrl
final int lengthFirstPartOfUrl = ("https://api-v2.soundcloud.com/tracks?client_id="
+ SoundcloudParsingHelper.clientId()
+ "&ids=").length();
final int lengthOfEveryStream = 11;
String currentPageUrl, nextUrl;
int lengthMaxStreams = lengthFirstPartOfUrl + lengthOfEveryStream * streamsPerRequestedPage;
if (pageUrl.length() <= lengthMaxStreams) {
currentPageUrl = pageUrl; // fetch every remaining video, there are less than the max
nextUrl = ""; // afterwards the list is complete
public InfoItemsPage<StreamInfoItem> getPage(Page page) throws IOException, ExtractionException {
final List<String> currentIds;
final List<String> nextIds;
if (page.getIds().size() <= STREAMS_PER_REQUESTED_PAGE) {
// Fetch every remaining stream, there are less than the max
currentIds = page.getIds();
nextIds = null;
} else {
currentPageUrl = pageUrl.substring(0, lengthMaxStreams);
nextUrl = pageUrl.substring(0, lengthFirstPartOfUrl) + pageUrl.substring(lengthMaxStreams);
currentIds = page.getIds().subList(0, STREAMS_PER_REQUESTED_PAGE);
nextIds = page.getIds().subList(STREAMS_PER_REQUESTED_PAGE, page.getIds().size());
}
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
String response = NewPipe.getDownloader().get(currentPageUrl, getExtractorLocalization()).responseBody();
final String currentPageUrl = "https://api-v2.soundcloud.com/tracks?client_id="
+ SoundcloudParsingHelper.clientId()
+ "&ids=" + Utils.join(",", currentIds);
final StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
final String response = NewPipe.getDownloader().get(currentPageUrl, getExtractorLocalization()).responseBody();
try {
JsonArray tracks = JsonParser.array().from(response);
final JsonArray tracks = JsonParser.array().from(response);
for (Object track : tracks) {
if (track instanceof JsonObject) {
collector.commit(new SoundcloudStreamInfoItemExtractor((JsonObject) track));
@ -197,6 +186,6 @@ public class SoundcloudPlaylistExtractor extends PlaylistExtractor {
throw new ParsingException("Could not parse json response", e);
}
return new InfoItemsPage<>(collector, nextUrl);
return new InfoItemsPage<>(collector, new Page(nextIds));
}
}

View File

@ -7,6 +7,7 @@ import com.grack.nanojson.JsonParserException;
import org.schabi.newpipe.extractor.InfoItem;
import org.schabi.newpipe.extractor.InfoItemExtractor;
import org.schabi.newpipe.extractor.InfoItemsCollector;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
@ -46,20 +47,20 @@ public class SoundcloudSearchExtractor extends SearchExtractor {
@Nonnull
@Override
public InfoItemsPage<InfoItem> getInitialPage() throws IOException, ExtractionException {
return new InfoItemsPage<>(collectItems(searchCollection), getNextPageUrlFromCurrentUrl(getUrl()));
return new InfoItemsPage<>(collectItems(searchCollection), getNextPageFromCurrentUrl(getUrl()));
}
@Override
public InfoItemsPage<InfoItem> getPage(String pageUrl) throws IOException, ExtractionException {
public InfoItemsPage<InfoItem> getPage(Page page) throws IOException, ExtractionException {
final Downloader dl = getDownloader();
try {
final String response = dl.get(pageUrl, getExtractorLocalization()).responseBody();
final String response = dl.get(page.getUrl(), getExtractorLocalization()).responseBody();
searchCollection = JsonParser.object().from(response).getArray("collection");
} catch (JsonParserException e) {
throw new ParsingException("Could not parse json response", e);
}
return new InfoItemsPage<>(collectItems(searchCollection), getNextPageUrlFromCurrentUrl(pageUrl));
return new InfoItemsPage<>(collectItems(searchCollection), getNextPageFromCurrentUrl(page.getUrl()));
}
@Override
@ -102,7 +103,7 @@ public class SoundcloudSearchExtractor extends SearchExtractor {
return collector;
}
private String getNextPageUrlFromCurrentUrl(String currentUrl)
private Page getNextPageFromCurrentUrl(String currentUrl)
throws MalformedURLException, UnsupportedEncodingException {
final int pageOffset = Integer.parseInt(
Parser.compatParseMap(
@ -110,8 +111,7 @@ public class SoundcloudSearchExtractor extends SearchExtractor {
.getQuery())
.get("offset"));
return currentUrl.replace("&offset=" +
Integer.toString(pageOffset),
"&offset=" + Integer.toString(pageOffset + ITEMS_PER_PAGE));
return new Page(currentUrl.replace("&offset=" + pageOffset,
"&offset=" + (pageOffset + ITEMS_PER_PAGE)));
}
}

View File

@ -2,6 +2,7 @@ package org.schabi.newpipe.extractor.services.youtube.extractors;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.channel.ChannelExtractor;
import org.schabi.newpipe.extractor.downloader.Downloader;
@ -224,7 +225,7 @@ public class YoutubeChannelExtractor extends ChannelExtractor {
public InfoItemsPage<StreamInfoItem> getInitialPage() throws ExtractionException {
final StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
String nextPageUrl = null;
Page nextPage = null;
if (getVideoTab() != null) {
final JsonObject gridRenderer = getVideoTab().getObject("content").getObject("sectionListRenderer")
@ -233,44 +234,39 @@ public class YoutubeChannelExtractor extends ChannelExtractor {
collectStreamsFrom(collector, gridRenderer.getArray("items"));
nextPageUrl = getNextPageUrlFrom(gridRenderer.getArray("continuations"));
nextPage = getNextPageFrom(gridRenderer.getArray("continuations"));
}
return new InfoItemsPage<>(collector, nextPageUrl);
return new InfoItemsPage<>(collector, nextPage);
}
@Override
public InfoItemsPage<StreamInfoItem> getPage(String pageUrl) throws IOException, ExtractionException {
if (isNullOrEmpty(pageUrl)) {
throw new ExtractionException(new IllegalArgumentException("Page url is empty or null"));
}
public InfoItemsPage<StreamInfoItem> getPage(Page page) throws IOException, ExtractionException {
// Unfortunately, we have to fetch the page even if we are only getting next streams,
// as they don't deliver enough information on their own (the channel name, for example).
fetchPage();
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
final JsonArray ajaxJson = getJsonResponse(pageUrl, getExtractorLocalization());
final JsonArray ajaxJson = getJsonResponse(page.getUrl(), getExtractorLocalization());
JsonObject sectionListContinuation = ajaxJson.getObject(1).getObject("response")
.getObject("continuationContents").getObject("gridContinuation");
collectStreamsFrom(collector, sectionListContinuation.getArray("items"));
return new InfoItemsPage<>(collector, getNextPageUrlFrom(sectionListContinuation.getArray("continuations")));
return new InfoItemsPage<>(collector, getNextPageFrom(sectionListContinuation.getArray("continuations")));
}
private String getNextPageUrlFrom(JsonArray continuations) {
private Page getNextPageFrom(final JsonArray continuations) {
if (isNullOrEmpty(continuations)) {
return "";
return null;
}
JsonObject nextContinuationData = continuations.getObject(0).getObject("nextContinuationData");
String continuation = nextContinuationData.getString("continuation");
String clickTrackingParams = nextContinuationData.getString("clickTrackingParams");
return "https://www.youtube.com/browse_ajax?ctoken=" + continuation + "&continuation=" + continuation
+ "&itct=" + clickTrackingParams;
final JsonObject nextContinuationData = continuations.getObject(0).getObject("nextContinuationData");
final String continuation = nextContinuationData.getString("continuation");
final String clickTrackingParams = nextContinuationData.getString("clickTrackingParams");
return new Page("https://www.youtube.com/browse_ajax?ctoken=" + continuation
+ "&continuation=" + continuation + "&itct=" + clickTrackingParams);
}
private void collectStreamsFrom(StreamInfoItemsCollector collector, JsonArray videos) throws ParsingException {

View File

@ -3,6 +3,8 @@ package org.schabi.newpipe.extractor.services.youtube.extractors;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.comments.CommentsExtractor;
import org.schabi.newpipe.extractor.comments.CommentsInfoItem;
@ -17,7 +19,6 @@ import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler;
import org.schabi.newpipe.extractor.utils.JsonUtils;
import org.schabi.newpipe.extractor.utils.Parser;
import javax.annotation.Nonnull;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
@ -26,19 +27,18 @@ import java.util.List;
import java.util.Map;
import java.util.regex.Pattern;
import static java.util.Collections.singletonList;
import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty;
import javax.annotation.Nonnull;
import static java.util.Collections.singletonList;
public class YoutubeCommentsExtractor extends CommentsExtractor {
// using the mobile site for comments because it loads faster and uses get requests instead of post
private static final String USER_AGENT = "Mozilla/5.0 (Android 8.1.0; Mobile; rv:62.0) Gecko/62.0 Firefox/62.0";
private static final Pattern YT_CLIENT_NAME_PATTERN = Pattern.compile("INNERTUBE_CONTEXT_CLIENT_NAME\\\":(.*?)[,}]");
private String ytClientVersion;
private String ytClientName;
private InfoItemsPage<CommentsInfoItem> initPage;
private String responseBody;
public YoutubeCommentsExtractor(StreamingService service, ListLinkHandler uiHandler) {
super(service, uiHandler);
@ -46,48 +46,45 @@ public class YoutubeCommentsExtractor extends CommentsExtractor {
@Override
public InfoItemsPage<CommentsInfoItem> getInitialPage() throws IOException, ExtractionException {
// initial page does not load any comments but is required to get comments token
super.fetchPage();
return initPage;
String commentsTokenInside = findValue(responseBody, "commentSectionRenderer", "}");
String commentsToken = findValue(commentsTokenInside, "continuation\":\"", "\"");
return getPage(getNextPage(commentsToken));
}
private String getNextPageUrl(JsonObject ajaxJson) throws ParsingException {
private Page getNextPage(JsonObject ajaxJson) throws ParsingException {
JsonArray arr;
try {
arr = JsonUtils.getArray(ajaxJson, "response.continuationContents.commentSectionContinuation.continuations");
} catch (Exception e) {
return "";
return null;
}
if (arr.isEmpty()) {
return "";
return null;
}
String continuation;
try {
continuation = JsonUtils.getString(arr.getObject(0), "nextContinuationData.continuation");
} catch (Exception e) {
return "";
return null;
}
return getNextPageUrl(continuation);
return getNextPage(continuation);
}
private String getNextPageUrl(String continuation) throws ParsingException {
private Page getNextPage(String continuation) throws ParsingException {
Map<String, String> params = new HashMap<>();
params.put("action_get_comments", "1");
params.put("pbj", "1");
params.put("ctoken", continuation);
try {
return "https://m.youtube.com/watch_comment?" + getDataString(params);
return new Page("https://m.youtube.com/watch_comment?" + getDataString(params));
} catch (UnsupportedEncodingException e) {
throw new ParsingException("Could not get next page url", e);
}
}
@Override
public InfoItemsPage<CommentsInfoItem> getPage(String pageUrl) throws IOException, ExtractionException {
if (isNullOrEmpty(pageUrl)) {
throw new ExtractionException(new IllegalArgumentException("Page url is empty or null"));
}
String ajaxResponse = makeAjaxRequest(pageUrl);
public InfoItemsPage<CommentsInfoItem> getPage(Page page) throws IOException, ExtractionException {
String ajaxResponse = makeAjaxRequest(page.getUrl());
JsonObject ajaxJson;
try {
ajaxJson = JsonParser.array().from(ajaxResponse).getObject(1);
@ -96,11 +93,10 @@ public class YoutubeCommentsExtractor extends CommentsExtractor {
}
CommentsInfoItemsCollector collector = new CommentsInfoItemsCollector(getServiceId());
collectCommentsFrom(collector, ajaxJson);
return new InfoItemsPage<>(collector, getNextPageUrl(ajaxJson));
return new InfoItemsPage<>(collector, getNextPage(ajaxJson));
}
private void collectCommentsFrom(CommentsInfoItemsCollector collector, JsonObject ajaxJson) throws ParsingException {
JsonArray contents;
try {
contents = JsonUtils.getArray(ajaxJson, "response.continuationContents.commentSectionContinuation.items");
@ -128,16 +124,13 @@ public class YoutubeCommentsExtractor extends CommentsExtractor {
final Map<String, List<String>> requestHeaders = new HashMap<>();
requestHeaders.put("User-Agent", singletonList(USER_AGENT));
final Response response = downloader.get(getUrl(), requestHeaders, getExtractorLocalization());
String responseBody = response.responseBody();
responseBody = response.responseBody();
ytClientVersion = findValue(responseBody, "INNERTUBE_CONTEXT_CLIENT_VERSION\":\"", "\"");
ytClientName = Parser.matchGroup1(YT_CLIENT_NAME_PATTERN, responseBody);
String commentsTokenInside = findValue(responseBody, "commentSectionRenderer", "}");
String commentsToken = findValue(commentsTokenInside, "continuation\":\"", "\"");
initPage = getPage(getNextPageUrl(commentsToken));
}
private String makeAjaxRequest(String siteUrl) throws IOException, ReCaptchaException {
private String makeAjaxRequest(String siteUrl) throws IOException, ReCaptchaException {
Map<String, List<String>> requestHeaders = new HashMap<>();
requestHeaders.put("Accept", singletonList("*/*"));
requestHeaders.put("User-Agent", singletonList(USER_AGENT));
@ -166,22 +159,4 @@ public class YoutubeCommentsExtractor extends CommentsExtractor {
int endIndex = doc.indexOf(end, beginIndex);
return doc.substring(beginIndex, endIndex);
}
public static String getYoutubeText(@Nonnull JsonObject object) throws ParsingException {
try {
return JsonUtils.getString(object, "simpleText");
} catch (Exception e1) {
try {
JsonArray arr = JsonUtils.getArray(object, "runs");
String result = "";
for (int i = 0; i < arr.size(); i++) {
result = result + JsonUtils.getString(arr.getObject(i), "text");
}
return result;
} catch (Exception e2) {
return "";
}
}
}
}

View File

@ -2,6 +2,7 @@ package org.schabi.newpipe.extractor.services.youtube.extractors;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import org.schabi.newpipe.extractor.comments.CommentsInfoItemExtractor;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.localization.DateWrapper;
@ -11,7 +12,7 @@ import org.schabi.newpipe.extractor.utils.Utils;
import javax.annotation.Nullable;
import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty;
import static org.schabi.newpipe.extractor.services.youtube.YoutubeParsingHelper.getTextFromObject;
public class YoutubeCommentsInfoItemExtractor implements CommentsInfoItemExtractor {
@ -43,7 +44,7 @@ public class YoutubeCommentsInfoItemExtractor implements CommentsInfoItemExtract
@Override
public String getName() throws ParsingException {
try {
return YoutubeCommentsExtractor.getYoutubeText(JsonUtils.getObject(json, "authorText"));
return getTextFromObject(JsonUtils.getObject(json, "authorText"));
} catch (Exception e) {
return "";
}
@ -52,7 +53,7 @@ public class YoutubeCommentsInfoItemExtractor implements CommentsInfoItemExtract
@Override
public String getTextualUploadDate() throws ParsingException {
try {
return YoutubeCommentsExtractor.getYoutubeText(JsonUtils.getObject(json, "publishedTimeText"));
return getTextFromObject(JsonUtils.getObject(json, "publishedTimeText"));
} catch (Exception e) {
throw new ParsingException("Could not get publishedTimeText", e);
}
@ -81,7 +82,7 @@ public class YoutubeCommentsInfoItemExtractor implements CommentsInfoItemExtract
@Override
public String getCommentText() throws ParsingException {
try {
String commentText = YoutubeCommentsExtractor.getYoutubeText(JsonUtils.getObject(json, "contentText"));
String commentText = getTextFromObject(JsonUtils.getObject(json, "contentText"));
// youtube adds U+FEFF in some comments. eg. https://www.youtube.com/watch?v=Nj4F63E59io<feff>
return Utils.removeUTF8BOM(commentText);
} catch (Exception e) {
@ -111,7 +112,7 @@ public class YoutubeCommentsInfoItemExtractor implements CommentsInfoItemExtract
@Override
public String getUploaderName() throws ParsingException {
try {
return YoutubeCommentsExtractor.getYoutubeText(JsonUtils.getObject(json, "authorText"));
return getTextFromObject(JsonUtils.getObject(json, "authorText"));
} catch (Exception e) {
return "";
}

View File

@ -5,6 +5,7 @@ import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.schabi.newpipe.extractor.ListExtractor;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.downloader.Response;
@ -15,9 +16,10 @@ import org.schabi.newpipe.extractor.services.youtube.YoutubeParsingHelper;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import javax.annotation.Nonnull;
import java.io.IOException;
import javax.annotation.Nonnull;
public class YoutubeFeedExtractor extends FeedExtractor {
public YoutubeFeedExtractor(StreamingService service, ListLinkHandler linkHandler) {
super(service, linkHandler);
@ -66,7 +68,7 @@ public class YoutubeFeedExtractor extends FeedExtractor {
}
@Override
public InfoItemsPage<StreamInfoItem> getPage(String pageUrl) {
public InfoItemsPage<StreamInfoItem> getPage(Page page) {
return null;
}
}

View File

@ -7,6 +7,7 @@ import com.grack.nanojson.JsonParserException;
import com.grack.nanojson.JsonWriter;
import org.schabi.newpipe.extractor.InfoItem;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
@ -169,7 +170,7 @@ public class YoutubeMusicSearchExtractor extends SearchExtractor {
final JsonArray contents = initialData.getObject("contents").getObject("sectionListRenderer").getArray("contents");
String nextPageUrl = null;
Page nextPage = null;
for (Object content : contents) {
if (((JsonObject) content).has("musicShelfRenderer")) {
@ -177,19 +178,15 @@ public class YoutubeMusicSearchExtractor extends SearchExtractor {
collectMusicStreamsFrom(collector, musicShelfRenderer.getArray("contents"));
nextPageUrl = getNextPageUrlFrom(musicShelfRenderer.getArray("continuations"));
nextPage = getNextPageFrom(musicShelfRenderer.getArray("continuations"));
}
}
return new InfoItemsPage<>(collector, nextPageUrl);
return new InfoItemsPage<>(collector, nextPage);
}
@Override
public InfoItemsPage<InfoItem> getPage(final String pageUrl) throws IOException, ExtractionException {
if (isNullOrEmpty(pageUrl)) {
throw new ExtractionException(new IllegalArgumentException("Page url is empty or null"));
}
public InfoItemsPage<InfoItem> getPage(final Page page) throws IOException, ExtractionException {
final InfoItemsSearchCollector collector = new InfoItemsSearchCollector(getServiceId());
final String[] youtubeMusicKeys = YoutubeParsingHelper.getYoutubeMusicKeys();
@ -229,7 +226,7 @@ public class YoutubeMusicSearchExtractor extends SearchExtractor {
headers.put("Referer", Collections.singletonList("music.youtube.com"));
headers.put("Content-Type", Collections.singletonList("application/json"));
final String responseBody = getValidJsonResponseBody(getDownloader().post(pageUrl, headers, json));
final String responseBody = getValidJsonResponseBody(getDownloader().post(page.getUrl(), headers, json));
final JsonObject ajaxJson;
try {
@ -243,7 +240,7 @@ public class YoutubeMusicSearchExtractor extends SearchExtractor {
collectMusicStreamsFrom(collector, musicShelfContinuation.getArray("contents"));
final JsonArray continuations = musicShelfContinuation.getArray("continuations");
return new InfoItemsPage<>(collector, getNextPageUrlFrom(continuations));
return new InfoItemsPage<>(collector, getNextPageFrom(continuations));
}
private void collectMusicStreamsFrom(final InfoItemsSearchCollector collector, final JsonArray videos) {
@ -488,16 +485,17 @@ public class YoutubeMusicSearchExtractor extends SearchExtractor {
}
}
private String getNextPageUrlFrom(final JsonArray continuations) throws ParsingException, IOException, ReCaptchaException {
private Page getNextPageFrom(final JsonArray continuations) throws ParsingException, IOException, ReCaptchaException {
if (isNullOrEmpty(continuations)) {
return "";
return null;
}
final JsonObject nextContinuationData = continuations.getObject(0).getObject("nextContinuationData");
final String continuation = nextContinuationData.getString("continuation");
final String clickTrackingParams = nextContinuationData.getString("clickTrackingParams");
return "https://music.youtube.com/youtubei/v1/search?ctoken=" + continuation + "&continuation=" + continuation
+ "&itct=" + clickTrackingParams + "&alt=json&key=" + YoutubeParsingHelper.getYoutubeMusicKeys()[0];
return new Page("https://music.youtube.com/youtubei/v1/search?ctoken=" + continuation
+ "&continuation=" + continuation + "&itct=" + clickTrackingParams + "&alt=json"
+ "&key=" + YoutubeParsingHelper.getYoutubeMusicKeys()[0]);
}
}

View File

@ -3,6 +3,7 @@ package org.schabi.newpipe.extractor.services.youtube.extractors;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
@ -169,7 +170,7 @@ public class YoutubePlaylistExtractor extends PlaylistExtractor {
@Override
public InfoItemsPage<StreamInfoItem> getInitialPage() {
final StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
String nextPageUrl = null;
Page nextPage = null;
final JsonArray contents = initialData.getObject("contents").getObject("twoColumnBrowseResultsRenderer")
.getArray("tabs").getObject(0).getObject("tabRenderer").getObject("content")
@ -191,39 +192,35 @@ public class YoutubePlaylistExtractor extends PlaylistExtractor {
final JsonObject videos = contents.getObject(0).getObject("playlistVideoListRenderer");
collectStreamsFrom(collector, videos.getArray("contents"));
nextPageUrl = getNextPageUrlFrom(videos.getArray("continuations"));
nextPage = getNextPageFrom(videos.getArray("continuations"));
}
return new InfoItemsPage<>(collector, nextPageUrl);
return new InfoItemsPage<>(collector, nextPage);
}
@Override
public InfoItemsPage<StreamInfoItem> getPage(final String pageUrl) throws IOException, ExtractionException {
if (isNullOrEmpty(pageUrl)) {
throw new ExtractionException(new IllegalArgumentException("Page url is empty or null"));
}
final StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
final JsonArray ajaxJson = getJsonResponse(pageUrl, getExtractorLocalization());
public InfoItemsPage<StreamInfoItem> getPage(final Page page) throws IOException, ExtractionException {
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
final JsonArray ajaxJson = getJsonResponse(page.getUrl(), getExtractorLocalization());
final JsonObject sectionListContinuation = ajaxJson.getObject(1).getObject("response")
.getObject("continuationContents").getObject("playlistVideoListContinuation");
collectStreamsFrom(collector, sectionListContinuation.getArray("contents"));
return new InfoItemsPage<>(collector, getNextPageUrlFrom(sectionListContinuation.getArray("continuations")));
return new InfoItemsPage<>(collector, getNextPageFrom(sectionListContinuation.getArray("continuations")));
}
private String getNextPageUrlFrom(final JsonArray continuations) {
private Page getNextPageFrom(final JsonArray continuations) {
if (isNullOrEmpty(continuations)) {
return "";
return null;
}
JsonObject nextContinuationData = continuations.getObject(0).getObject("nextContinuationData");
String continuation = nextContinuationData.getString("continuation");
String clickTrackingParams = nextContinuationData.getString("clickTrackingParams");
return "https://www.youtube.com/browse_ajax?ctoken=" + continuation + "&continuation=" + continuation
+ "&itct=" + clickTrackingParams;
return new Page("https://www.youtube.com/browse_ajax?ctoken=" + continuation + "&continuation=" + continuation
+ "&itct=" + clickTrackingParams);
}
private void collectStreamsFrom(final StreamInfoItemsCollector collector, final JsonArray videos) {

View File

@ -3,6 +3,7 @@ package org.schabi.newpipe.extractor.services.youtube.extractors;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import org.schabi.newpipe.extractor.InfoItem;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
@ -101,27 +102,23 @@ public class YoutubeSearchExtractor extends SearchExtractor {
final JsonArray sections = initialData.getObject("contents").getObject("twoColumnSearchResultsRenderer")
.getObject("primaryContents").getObject("sectionListRenderer").getArray("contents");
String nextPageUrl = null;
Page nextPage = null;
for (Object section : sections) {
final JsonObject itemSectionRenderer = ((JsonObject) section).getObject("itemSectionRenderer");
collectStreamsFrom(collector, itemSectionRenderer.getArray("contents"));
nextPageUrl = getNextPageUrlFrom(itemSectionRenderer.getArray("continuations"));
nextPage = getNextPageFrom(itemSectionRenderer.getArray("continuations"));
}
return new InfoItemsPage<>(collector, nextPageUrl);
return new InfoItemsPage<>(collector, nextPage);
}
@Override
public InfoItemsPage<InfoItem> getPage(final String pageUrl) throws IOException, ExtractionException {
if (isNullOrEmpty(pageUrl)) {
throw new ExtractionException(new IllegalArgumentException("Page url is empty or null"));
}
public InfoItemsPage<InfoItem> getPage(final Page page) throws IOException, ExtractionException {
final InfoItemsSearchCollector collector = new InfoItemsSearchCollector(getServiceId());
final JsonArray ajaxJson = getJsonResponse(pageUrl, getExtractorLocalization());
final JsonArray ajaxJson = getJsonResponse(page.getUrl(), getExtractorLocalization());
final JsonObject itemSectionRenderer = ajaxJson.getObject(1).getObject("response")
.getObject("continuationContents").getObject("itemSectionContinuation");
@ -129,7 +126,7 @@ public class YoutubeSearchExtractor extends SearchExtractor {
collectStreamsFrom(collector, itemSectionRenderer.getArray("contents"));
final JsonArray continuations = itemSectionRenderer.getArray("continuations");
return new InfoItemsPage<>(collector, getNextPageUrlFrom(continuations));
return new InfoItemsPage<>(collector, getNextPageFrom(continuations));
}
private void collectStreamsFrom(final InfoItemsSearchCollector collector, final JsonArray videos) throws NothingFoundException, ParsingException {
@ -149,16 +146,16 @@ public class YoutubeSearchExtractor extends SearchExtractor {
}
}
private String getNextPageUrlFrom(final JsonArray continuations) throws ParsingException {
private Page getNextPageFrom(final JsonArray continuations) throws ParsingException {
if (isNullOrEmpty(continuations)) {
return "";
return null;
}
final JsonObject nextContinuationData = continuations.getObject(0).getObject("nextContinuationData");
final String continuation = nextContinuationData.getString("continuation");
final String clickTrackingParams = nextContinuationData.getString("clickTrackingParams");
return getUrl() + "&pbj=1&ctoken=" + continuation + "&continuation=" + continuation
+ "&itct=" + clickTrackingParams;
return new Page(getUrl() + "&pbj=1&ctoken=" + continuation + "&continuation=" + continuation
+ "&itct=" + clickTrackingParams);
}
}

View File

@ -23,6 +23,7 @@ package org.schabi.newpipe.extractor.services.youtube.extractors;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
@ -61,7 +62,7 @@ public class YoutubeTrendingExtractor extends KioskExtractor<StreamInfoItem> {
}
@Override
public InfoItemsPage<StreamInfoItem> getPage(String pageUrl) {
public InfoItemsPage<StreamInfoItem> getPage(Page page) {
return null;
}

View File

@ -7,6 +7,7 @@ import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLDecoder;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@ -222,4 +223,16 @@ public class Utils {
return true;
}
public static String join(final CharSequence delimiter, final Iterable<? extends CharSequence> elements) {
final StringBuilder stringBuilder = new StringBuilder();
final Iterator<? extends CharSequence> iterator = elements.iterator();
while (iterator.hasNext()) {
stringBuilder.append(iterator.next());
if (iterator.hasNext()) {
stringBuilder.append(delimiter);
}
}
return stringBuilder.toString();
}
}

View File

@ -2,6 +2,7 @@ package org.schabi.newpipe.extractor.services;
import org.schabi.newpipe.extractor.InfoItem;
import org.schabi.newpipe.extractor.ListExtractor;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.channel.ChannelInfoItem;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
@ -86,8 +87,6 @@ public final class DefaultTests {
public static <T extends InfoItem> void assertNoMoreItems(ListExtractor<T> extractor) throws Exception {
final ListExtractor.InfoItemsPage<T> initialPage = extractor.getInitialPage();
assertFalse("More items available when it shouldn't", initialPage.hasNextPage());
final String nextPageUrl = initialPage.getNextPageUrl();
assertTrue("Next page is not empty or null", isNullOrEmpty(nextPageUrl));
}
public static void assertNoDuplicatedItems(StreamingService expectedService,
@ -121,7 +120,7 @@ public final class DefaultTests {
public static <T extends InfoItem> ListExtractor.InfoItemsPage<T> defaultTestMoreItems(ListExtractor<T> extractor) throws Exception {
final ListExtractor.InfoItemsPage<T> initialPage = extractor.getInitialPage();
assertTrue("Doesn't have more items", initialPage.hasNextPage());
ListExtractor.InfoItemsPage<T> nextPage = extractor.getPage(initialPage.getNextPageUrl());
ListExtractor.InfoItemsPage<T> nextPage = extractor.getPage(initialPage.getNextPage());
final List<T> items = nextPage.getItems();
assertFalse("Next page is empty", items.isEmpty());
assertEmptyErrors("Next page have errors", nextPage.getErrors());
@ -131,9 +130,9 @@ public final class DefaultTests {
}
public static void defaultTestGetPageInNewExtractor(ListExtractor<? extends InfoItem> extractor, ListExtractor<? extends InfoItem> newExtractor) throws Exception {
final String nextPageUrl = extractor.getInitialPage().getNextPageUrl();
final Page nextPage = extractor.getInitialPage().getNextPage();
final ListExtractor.InfoItemsPage<? extends InfoItem> page = newExtractor.getPage(nextPageUrl);
final ListExtractor.InfoItemsPage<? extends InfoItem> page = newExtractor.getPage(nextPage);
defaultTestListOfItems(extractor.getService(), page.getItems(), page.getErrors());
}
}

View File

@ -5,6 +5,7 @@ import org.junit.Test;
import org.schabi.newpipe.DownloaderTestImpl;
import org.schabi.newpipe.extractor.ListExtractor.InfoItemsPage;
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.comments.CommentsInfo;
import org.schabi.newpipe.extractor.comments.CommentsInfoItem;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
@ -14,7 +15,9 @@ import org.schabi.newpipe.extractor.utils.Utils;
import java.io.IOException;
import java.util.List;
import static org.junit.Assert.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.schabi.newpipe.extractor.ServiceList.PeerTube;
public class PeertubeCommentsExtractorTest {
@ -31,11 +34,10 @@ public class PeertubeCommentsExtractorTest {
@Test
public void testGetComments() throws IOException, ExtractionException {
InfoItemsPage<CommentsInfoItem> comments = extractor.getInitialPage();
assertTrue(comments.getErrors().isEmpty());
boolean result = findInComments(comments, "@root A great documentary on a great guy.");
while (comments.hasNextPage() && !result) {
comments = extractor.getPage(comments.getNextPageUrl());
comments = extractor.getPage(comments.getNextPage());
result = findInComments(comments, "@root A great documentary on a great guy.");
}
@ -44,24 +46,26 @@ public class PeertubeCommentsExtractorTest {
@Test
public void testGetCommentsFromCommentsInfo() throws IOException, ExtractionException {
final CommentsInfo commentsInfo = CommentsInfo.getInfo("https://framatube.org/videos/watch/a8ea95b8-0396-49a6-8f30-e25e25fb2828");
assertTrue(commentsInfo.getErrors().isEmpty());
CommentsInfo commentsInfo = CommentsInfo.getInfo("https://framatube.org/videos/watch/a8ea95b8-0396-49a6-8f30-e25e25fb2828");
assertEquals("Comments", commentsInfo.getName());
boolean result = findInComments(commentsInfo.getRelatedItems(), "Loved it!!!");
String nextPage = commentsInfo.getNextPageUrl();
while (!Utils.isBlank(nextPage) && !result) {
final InfoItemsPage<CommentsInfoItem> moreItems = CommentsInfo.getMoreItems(PeerTube, commentsInfo, nextPage);
Page nextPage = commentsInfo.getNextPage();
InfoItemsPage<CommentsInfoItem> moreItems = new InfoItemsPage<>(null, nextPage, null);
while (moreItems.hasNextPage() && !result) {
moreItems = CommentsInfo.getMoreItems(PeerTube, commentsInfo, nextPage);
result = findInComments(moreItems.getItems(), "Loved it!!!");
nextPage = moreItems.getNextPageUrl();
nextPage = moreItems.getNextPage();
}
assertTrue(result);
}
@Test
public void testGetCommentsAllData() throws IOException, ExtractionException {
final InfoItemsPage<CommentsInfoItem> comments = extractor.getInitialPage();
for (final CommentsInfoItem c : comments.getItems()) {
InfoItemsPage<CommentsInfoItem> comments = extractor.getInitialPage();
for (CommentsInfoItem c : comments.getItems()) {
assertFalse(Utils.isBlank(c.getUploaderUrl()));
assertFalse(Utils.isBlank(c.getUploaderName()));
assertFalse(Utils.isBlank(c.getUploaderAvatarUrl()));
@ -71,16 +75,16 @@ public class PeertubeCommentsExtractorTest {
assertFalse(Utils.isBlank(c.getTextualUploadDate()));
assertFalse(Utils.isBlank(c.getThumbnailUrl()));
assertFalse(Utils.isBlank(c.getUrl()));
assertEquals(-1, c.getLikeCount());
assertFalse(c.getLikeCount() != -1);
}
}
private boolean findInComments(final InfoItemsPage<CommentsInfoItem> comments, final String comment) {
private boolean findInComments(InfoItemsPage<CommentsInfoItem> comments, String comment) {
return findInComments(comments.getItems(), comment);
}
private boolean findInComments(final List<CommentsInfoItem> comments, final String comment) {
for (final CommentsInfoItem c : comments) {
private boolean findInComments(List<CommentsInfoItem> comments, String comment) {
for (CommentsInfoItem c : comments) {
if (c.getCommentText().contains(comment)) {
return true;
}

View File

@ -51,7 +51,7 @@ public class PeertubeSearchExtractorTest {
extractor.fetchPage();
final InfoItemsPage<InfoItem> page1 = extractor.getInitialPage();
final InfoItemsPage<InfoItem> page2 = extractor.getPage(page1.getNextPageUrl());
final InfoItemsPage<InfoItem> page2 = extractor.getPage(page1.getNextPage());
assertNoDuplicatedItems(PeerTube, page1, page2);
}

View File

@ -268,7 +268,7 @@ public class SoundcloudPlaylistExtractorTest {
ListExtractor.InfoItemsPage<StreamInfoItem> currentPage = defaultTestMoreItems(extractor);
// Test for 2 more levels
for (int i = 0; i < 2; i++) {
currentPage = extractor.getPage(currentPage.getNextPageUrl());
currentPage = extractor.getPage(currentPage.getNextPage());
defaultTestListOfItems(SoundCloud, currentPage.getItems(), currentPage.getErrors());
}
}

View File

@ -119,7 +119,7 @@ public class SoundcloudSearchExtractorTest {
extractor.fetchPage();
final InfoItemsPage<InfoItem> page1 = extractor.getInitialPage();
final InfoItemsPage<InfoItem> page2 = extractor.getPage(page1.getNextPageUrl());
final InfoItemsPage<InfoItem> page2 = extractor.getPage(page1.getNextPage());
assertNoDuplicatedItems(SoundCloud, page1, page2);
}

View File

@ -5,6 +5,7 @@ import org.junit.Test;
import org.schabi.newpipe.DownloaderTestImpl;
import org.schabi.newpipe.extractor.ListExtractor.InfoItemsPage;
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.comments.CommentsInfo;
import org.schabi.newpipe.extractor.comments.CommentsInfoItem;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
@ -22,7 +23,6 @@ import static org.junit.Assert.assertTrue;
import static org.schabi.newpipe.extractor.ServiceList.YouTube;
public class YoutubeCommentsExtractorTest {
private static final String urlYT = "https://www.youtube.com/watch?v=D00Au7k3i6o";
private static final String urlInvidious = "https://invidio.us/watch?v=D00Au7k3i6o";
private static YoutubeCommentsExtractor extractorYT;
@ -33,6 +33,7 @@ public class YoutubeCommentsExtractorTest {
NewPipe.init(DownloaderTestImpl.getInstance());
extractorYT = (YoutubeCommentsExtractor) YouTube
.getCommentsExtractor(urlYT);
extractorYT.fetchPage();
extractorInvidious = (YoutubeCommentsExtractor) YouTube
.getCommentsExtractor(urlInvidious);
}
@ -44,12 +45,11 @@ public class YoutubeCommentsExtractorTest {
}
private boolean getCommentsHelper(YoutubeCommentsExtractor extractor) throws IOException, ExtractionException {
boolean result;
InfoItemsPage<CommentsInfoItem> comments = extractor.getInitialPage();
result = findInComments(comments, "s1ck m3m3");
boolean result = findInComments(comments, "s1ck m3m3");
while (comments.hasNextPage() && !result) {
comments = extractor.getPage(comments.getNextPageUrl());
comments = extractor.getPage(comments.getNextPage());
result = findInComments(comments, "s1ck m3m3");
}
@ -63,16 +63,18 @@ public class YoutubeCommentsExtractorTest {
}
private boolean getCommentsFromCommentsInfoHelper(String url) throws IOException, ExtractionException {
boolean result = false;
CommentsInfo commentsInfo = CommentsInfo.getInfo(url);
result = findInComments(commentsInfo.getRelatedItems(), "s1ck m3m3");
/* String nextPage = commentsInfo.getNextPageUrl();
while (!Utils.isBlank(nextPage) && !result) {
InfoItemsPage<CommentsInfoItem> moreItems = CommentsInfo.getMoreItems(YouTube, commentsInfo, nextPage);
assertEquals("Comments", commentsInfo.getName());
boolean result = findInComments(commentsInfo.getRelatedItems(), "s1ck m3m3");
Page nextPage = commentsInfo.getNextPage();
InfoItemsPage<CommentsInfoItem> moreItems = new InfoItemsPage<>(null, nextPage, null);
while (moreItems.hasNextPage() && !result) {
moreItems = CommentsInfo.getMoreItems(YouTube, commentsInfo, nextPage);
result = findInComments(moreItems.getItems(), "s1ck m3m3");
nextPage = moreItems.getNextPageUrl();
}*/
nextPage = moreItems.getNextPage();
}
return result;
}

View File

@ -209,7 +209,7 @@ public class YoutubePlaylistExtractorTest {
// test for 2 more levels
for (int i = 0; i < 2; i++) {
currentPage = extractor.getPage(currentPage.getNextPageUrl());
currentPage = extractor.getPage(currentPage.getNextPage());
defaultTestListOfItems(YouTube, currentPage.getItems(), currentPage.getErrors());
}
}

View File

@ -19,8 +19,9 @@ import static org.junit.Assert.assertTrue;
import static org.schabi.newpipe.extractor.ExtractorAsserts.assertEmptyErrors;
import static org.schabi.newpipe.extractor.ServiceList.YouTube;
import static org.schabi.newpipe.extractor.services.DefaultTests.assertNoDuplicatedItems;
import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeSearchQueryHandlerFactory.*;
import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty;
import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeSearchQueryHandlerFactory.CHANNELS;
import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeSearchQueryHandlerFactory.PLAYLISTS;
import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeSearchQueryHandlerFactory.VIDEOS;
public class YoutubeSearchExtractorTest {
public static class All extends DefaultSearchExtractorTest {
@ -189,13 +190,11 @@ public class YoutubeSearchExtractorTest {
final ListExtractor.InfoItemsPage<InfoItem> initialPage = extractor().getInitialPage();
// YouTube actually gives us an empty next page, but after that, no more pages.
assertTrue(initialPage.hasNextPage());
final ListExtractor.InfoItemsPage<InfoItem> nextEmptyPage = extractor.getPage(initialPage.getNextPageUrl());
final ListExtractor.InfoItemsPage<InfoItem> nextEmptyPage = extractor.getPage(initialPage.getNextPage());
assertEquals(0, nextEmptyPage.getItems().size());
assertEmptyErrors("Empty page has errors", nextEmptyPage.getErrors());
assertFalse("More items available when it shouldn't", nextEmptyPage.hasNextPage());
final String nextPageUrl = nextEmptyPage.getNextPageUrl();
assertTrue("Next page is not empty or null", isNullOrEmpty(nextPageUrl));
}
}
@ -207,7 +206,7 @@ public class YoutubeSearchExtractorTest {
extractor.fetchPage();
final ListExtractor.InfoItemsPage<InfoItem> page1 = extractor.getInitialPage();
final ListExtractor.InfoItemsPage<InfoItem> page2 = extractor.getPage(page1.getNextPageUrl());
final ListExtractor.InfoItemsPage<InfoItem> page2 = extractor.getPage(page1.getNextPage());
assertNoDuplicatedItems(YouTube, page1, page2);
}