Make some methods return the specific InfoItem type

- Some methods were returning a broader range of InfoItem types than it should be. For example, a ChannelInfo should return a List containing only StreamInfoItem, instead of the more general InfoItem.
- Renamed and changed return type of ListExtractor.getInfoItems to getInitialPage returning a InfoItemsPage, to be consistent with getPage(url)
This commit is contained in:
Mauricio Colli 2018-03-11 17:54:41 -03:00
parent 5dd2daad37
commit 37f2e5cfda
No known key found for this signature in database
GPG Key ID: F200BFD6F29DDD85
19 changed files with 159 additions and 151 deletions

View File

@ -4,21 +4,45 @@ import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import javax.annotation.Nonnull; import javax.annotation.Nonnull;
import java.io.IOException; import java.io.IOException;
import java.util.Collections;
import java.util.List; import java.util.List;
/** /**
* Base class to extractors that have a list (e.g. playlists, users). * Base class to extractors that have a list (e.g. playlists, users).
*/ */
public abstract class ListExtractor extends Extractor { public abstract class ListExtractor<R extends InfoItem> extends Extractor {
public ListExtractor(StreamingService service, String url) { public ListExtractor(StreamingService service, String url) {
super(service, url); super(service, url);
} }
/**
* A {@link InfoItemsPage InfoItemsPage} corresponding to the initial page where the items are from the initial request and
* the nextPageUrl relative to it.
*
* @return a {@link InfoItemsPage} corresponding to the initial page
*/
@Nonnull @Nonnull
public abstract InfoItemsCollector<? extends InfoItem, ?> getInfoItems() throws IOException, ExtractionException; public abstract InfoItemsPage<R> getInitialPage() throws IOException, ExtractionException;
/**
* Returns an url that can be used to get the next page relative to the initial one.<br/>
* <p>Usually, these links will only work in the implementation itself.</p>
*
* @return an url pointing to the next page relative to the initial page
* @see #getPage(String)
*/
public abstract String getNextPageUrl() throws IOException, ExtractionException; public abstract String getNextPageUrl() throws IOException, ExtractionException;
public abstract InfoItemPage<? extends InfoItem> getPage(final String nextPageUrl) throws IOException, ExtractionException;
/**
* Get a list of items corresponding to the specific requested page.
*
* @param nextPageUrl any next page url got from the exclusive implementation of the list extractor
* @return a {@link InfoItemsPage} corresponding to the requested page
* @see #getNextPageUrl()
* @see InfoItemsPage#getNextPageUrl()
*/
public abstract InfoItemsPage<R> getPage(final String nextPageUrl) throws IOException, ExtractionException;
public boolean hasNextPage() throws IOException, ExtractionException { public boolean hasNextPage() throws IOException, ExtractionException {
final String nextPageUrl = getNextPageUrl(); final String nextPageUrl = getNextPageUrl();
@ -29,14 +53,34 @@ public abstract class ListExtractor extends Extractor {
// Inner // Inner
//////////////////////////////////////////////////////////////////////////*/ //////////////////////////////////////////////////////////////////////////*/
public static class InfoItemPage<T extends InfoItem> { /**
* A class that is used to wrap a list of gathered items and eventual errors, it
* also contains a field that points to the next available page ({@link #nextPageUrl}).
*/
public static class InfoItemsPage<T extends InfoItem> {
private static final InfoItemsPage<InfoItem> EMPTY =
new InfoItemsPage<>(Collections.<InfoItem>emptyList(), "", Collections.<Throwable>emptyList());
/** /**
* The current list of items to this result * A convenient method that returns a representation of an empty page.
*
* @return a type-safe page with the list of items and errors empty and the nextPageUrl set to an empty string.
*/
public static <T extends InfoItem> InfoItemsPage<T> emptyPage() {
//noinspection unchecked
return (InfoItemsPage<T>) EMPTY;
}
/**
* The current list of items of this page
*/ */
private final List<T> itemsList; private final List<T> itemsList;
/** /**
* Next url to fetch more items * Url pointing to the next page relative to this one
*
* @see ListExtractor#getPage(String)
*/ */
private final String nextPageUrl; private final String nextPageUrl;
@ -45,11 +89,11 @@ public abstract class ListExtractor extends Extractor {
*/ */
private final List<Throwable> errors; private final List<Throwable> errors;
public InfoItemPage(InfoItemsCollector<T, ?> collector, String nextPageUrl) { public InfoItemsPage(InfoItemsCollector<T, ?> collector, String nextPageUrl) {
this(collector.getItems(), nextPageUrl, collector.getErrors()); this(collector.getItems(), nextPageUrl, collector.getErrors());
} }
public InfoItemPage(List<T> itemsList, String nextPageUrl, List<Throwable> errors) { public InfoItemsPage(List<T> itemsList, String nextPageUrl, List<Throwable> errors) {
this.itemsList = itemsList; this.itemsList = itemsList;
this.nextPageUrl = nextPageUrl; this.nextPageUrl = nextPageUrl;
this.errors = errors; this.errors = errors;
@ -59,7 +103,7 @@ public abstract class ListExtractor extends Extractor {
return nextPageUrl != null && !nextPageUrl.isEmpty(); return nextPageUrl != null && !nextPageUrl.isEmpty();
} }
public List<T> getItemsList() { public List<T> getItems() {
return itemsList; return itemsList;
} }

View File

@ -1,16 +1,12 @@
package org.schabi.newpipe.extractor.channel; package org.schabi.newpipe.extractor.channel;
import edu.umd.cs.findbugs.annotations.NonNull;
import org.schabi.newpipe.extractor.ListExtractor; import org.schabi.newpipe.extractor.ListExtractor;
import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.UrlIdHandler; import org.schabi.newpipe.extractor.UrlIdHandler;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException; import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.stream.StreamInfoItem; import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import javax.annotation.Nonnull; import javax.annotation.Nonnull;
import java.io.IOException;
/* /*
* Created by Christian Schabesberger on 25.07.16. * Created by Christian Schabesberger on 25.07.16.
@ -32,7 +28,7 @@ import java.io.IOException;
* along with NewPipe. If not, see <http://www.gnu.org/licenses/>. * along with NewPipe. If not, see <http://www.gnu.org/licenses/>.
*/ */
public abstract class ChannelExtractor extends ListExtractor { public abstract class ChannelExtractor extends ListExtractor<StreamInfoItem> {
public ChannelExtractor(StreamingService service, String url) { public ChannelExtractor(StreamingService service, String url) {
super(service, url); super(service, url);
@ -44,12 +40,6 @@ public abstract class ChannelExtractor extends ListExtractor {
return getService().getChannelUrlIdHandler(); return getService().getChannelUrlIdHandler();
} }
@NonNull
@Override
public abstract StreamInfoItemsCollector getInfoItems() throws IOException, ExtractionException;
@Override
public abstract InfoItemPage<StreamInfoItem> getPage(String nextPageUrl) throws IOException, ExtractionException;
public abstract String getAvatarUrl() throws ParsingException; public abstract String getAvatarUrl() throws ParsingException;
public abstract String getBannerUrl() throws ParsingException; public abstract String getBannerUrl() throws ParsingException;
public abstract String getFeedUrl() throws ParsingException; public abstract String getFeedUrl() throws ParsingException;

View File

@ -1,6 +1,6 @@
package org.schabi.newpipe.extractor.channel; package org.schabi.newpipe.extractor.channel;
import org.schabi.newpipe.extractor.ListExtractor.InfoItemPage; import org.schabi.newpipe.extractor.ListExtractor.InfoItemsPage;
import org.schabi.newpipe.extractor.ListInfo; import org.schabi.newpipe.extractor.ListInfo;
import org.schabi.newpipe.extractor.NewPipe; import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.StreamingService;
@ -30,18 +30,12 @@ import java.io.IOException;
* along with NewPipe. If not, see <http://www.gnu.org/licenses/>. * along with NewPipe. If not, see <http://www.gnu.org/licenses/>.
*/ */
public class ChannelInfo extends ListInfo { public class ChannelInfo extends ListInfo<StreamInfoItem> {
public ChannelInfo(int serviceId, String url, String id, String name) { public ChannelInfo(int serviceId, String url, String id, String name) {
super(serviceId, id, url, name); super(serviceId, id, url, name);
} }
public static InfoItemPage<StreamInfoItem> getMoreItems(StreamingService service, String url, String pageUrl)
throws IOException, ExtractionException {
return service.getChannelExtractor(url).getPage(pageUrl);
}
public static ChannelInfo getInfo(String url) throws IOException, ExtractionException { public static ChannelInfo getInfo(String url) throws IOException, ExtractionException {
return getInfo(NewPipe.getServiceByUrl(url), url); return getInfo(NewPipe.getServiceByUrl(url), url);
} }
@ -52,6 +46,10 @@ public class ChannelInfo extends ListInfo {
return getInfo(extractor); return getInfo(extractor);
} }
public static InfoItemsPage<StreamInfoItem> getMoreItems(StreamingService service, String url, String pageUrl) throws IOException, ExtractionException {
return service.getChannelExtractor(url).getPage(pageUrl);
}
public static ChannelInfo getInfo(ChannelExtractor extractor) throws IOException, ExtractionException { public static ChannelInfo getInfo(ChannelExtractor extractor) throws IOException, ExtractionException {
// important data // important data
@ -79,7 +77,9 @@ public class ChannelInfo extends ListInfo {
info.addError(e); info.addError(e);
} }
info.setRelatedItems(ExtractorHelper.getInfoItemsOrLogError(info, extractor)); final InfoItemsPage<StreamInfoItem> itemsPage = ExtractorHelper.getItemsPageOrLogError(info, extractor);
info.setRelatedItems(itemsPage.getItems());
info.setNextPageUrl(itemsPage.getNextPageUrl());
try { try {
info.setSubscriberCount(extractor.getSubscriberCount()); info.setSubscriberCount(extractor.getSubscriberCount());
@ -92,7 +92,6 @@ public class ChannelInfo extends ListInfo {
info.addError(e); info.addError(e);
} }
info.setNextPageUrl(extractor.getNextPageUrl());
return info; return info;
} }

View File

@ -20,18 +20,15 @@ package org.schabi.newpipe.extractor.kiosk;
* along with NewPipe. If not, see <http://www.gnu.org/licenses/>. * along with NewPipe. If not, see <http://www.gnu.org/licenses/>.
*/ */
import edu.umd.cs.findbugs.annotations.NonNull;
import org.schabi.newpipe.extractor.ListExtractor; import org.schabi.newpipe.extractor.ListExtractor;
import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.exceptions.ExtractionException; import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException; import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.stream.StreamInfoItem; import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import javax.annotation.Nonnull; import javax.annotation.Nonnull;
import java.io.IOException;
public abstract class KioskExtractor extends ListExtractor { public abstract class KioskExtractor extends ListExtractor<StreamInfoItem> {
private String contentCountry = null; private String contentCountry = null;
private final String id; private final String id;
@ -43,12 +40,6 @@ public abstract class KioskExtractor extends ListExtractor {
this.id = kioskId; this.id = kioskId;
} }
@NonNull
@Override
public abstract StreamInfoItemsCollector getInfoItems() throws IOException, ExtractionException;
@Override
public abstract InfoItemPage<StreamInfoItem> getPage(String nextPageUrl) throws IOException, ExtractionException;
/** /**
* For certain Websites the content of a kiosk will be different depending * For certain Websites the content of a kiosk will be different depending
* on the country you want to poen the website in. Therefore you should * on the country you want to poen the website in. Therefore you should

View File

@ -30,16 +30,16 @@ import org.schabi.newpipe.extractor.utils.ExtractorHelper;
import java.io.IOException; import java.io.IOException;
public class KioskInfo extends ListInfo { public class KioskInfo extends ListInfo<StreamInfoItem> {
private KioskInfo(int serviceId, String id, String url, String name) { private KioskInfo(int serviceId, String id, String url, String name) {
super(serviceId, id, url, name); super(serviceId, id, url, name);
} }
public static ListExtractor.InfoItemPage<StreamInfoItem> getMoreItems(StreamingService service, public static ListExtractor.InfoItemsPage<StreamInfoItem> getMoreItems(StreamingService service,
String url, String url,
String pageUrl, String pageUrl,
String contentCountry) throws IOException, ExtractionException { String contentCountry) throws IOException, ExtractionException {
KioskList kl = service.getKioskList(); KioskList kl = service.getKioskList();
KioskExtractor extractor = kl.getExtractorByUrl(url, pageUrl); KioskExtractor extractor = kl.getExtractorByUrl(url, pageUrl);
extractor.setContentCountry(contentCountry); extractor.setContentCountry(contentCountry);
@ -75,7 +75,9 @@ public class KioskInfo extends ListInfo {
KioskInfo info = new KioskInfo(serviceId, id, name, url); KioskInfo info = new KioskInfo(serviceId, id, name, url);
info.setRelatedItems(ExtractorHelper.getInfoItemsOrLogError(info, extractor)); final ListExtractor.InfoItemsPage<StreamInfoItem> itemsPage = ExtractorHelper.getItemsPageOrLogError(info, extractor);
info.setRelatedItems(itemsPage.getItems());
info.setNextPageUrl(itemsPage.getNextPageUrl());
return info; return info;
} }

View File

@ -1,18 +1,14 @@
package org.schabi.newpipe.extractor.playlist; package org.schabi.newpipe.extractor.playlist;
import edu.umd.cs.findbugs.annotations.NonNull;
import org.schabi.newpipe.extractor.ListExtractor; import org.schabi.newpipe.extractor.ListExtractor;
import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.UrlIdHandler; import org.schabi.newpipe.extractor.UrlIdHandler;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException; import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.stream.StreamInfoItem; import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import javax.annotation.Nonnull; import javax.annotation.Nonnull;
import java.io.IOException;
public abstract class PlaylistExtractor extends ListExtractor { public abstract class PlaylistExtractor extends ListExtractor<StreamInfoItem> {
public PlaylistExtractor(StreamingService service, String url) { public PlaylistExtractor(StreamingService service, String url) {
super(service, url); super(service, url);
@ -24,12 +20,6 @@ public abstract class PlaylistExtractor extends ListExtractor {
return getService().getPlaylistUrlIdHandler(); return getService().getPlaylistUrlIdHandler();
} }
@NonNull
@Override
public abstract StreamInfoItemsCollector getInfoItems() throws IOException, ExtractionException;
@Override
public abstract InfoItemPage<StreamInfoItem> getPage(String nextPageUrl) throws IOException, ExtractionException;
public abstract String getThumbnailUrl() throws ParsingException; public abstract String getThumbnailUrl() throws ParsingException;
public abstract String getBannerUrl() throws ParsingException; public abstract String getBannerUrl() throws ParsingException;

View File

@ -1,26 +1,21 @@
package org.schabi.newpipe.extractor.playlist; package org.schabi.newpipe.extractor.playlist;
import org.schabi.newpipe.extractor.ListExtractor.InfoItemPage; import org.schabi.newpipe.extractor.ListExtractor.InfoItemsPage;
import org.schabi.newpipe.extractor.ListInfo; import org.schabi.newpipe.extractor.ListInfo;
import org.schabi.newpipe.extractor.NewPipe; import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.exceptions.ExtractionException; import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.stream.StreamInfoItem; import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.utils.ExtractorHelper;
import java.io.IOException; import java.io.IOException;
import static org.schabi.newpipe.extractor.utils.ExtractorHelper.getInfoItemsOrLogError; public class PlaylistInfo extends ListInfo<StreamInfoItem> {
public class PlaylistInfo extends ListInfo {
public PlaylistInfo(int serviceId, String id, String url, String name) { public PlaylistInfo(int serviceId, String id, String url, String name) {
super(serviceId, id, url, name); super(serviceId, id, url, name);
} }
public static InfoItemPage<StreamInfoItem> getMoreItems(StreamingService service, String url, String pageUrl) throws IOException, ExtractionException {
return service.getPlaylistExtractor(url).getPage(pageUrl);
}
public static PlaylistInfo getInfo(String url) throws IOException, ExtractionException { public static PlaylistInfo getInfo(String url) throws IOException, ExtractionException {
return getInfo(NewPipe.getServiceByUrl(url), url); return getInfo(NewPipe.getServiceByUrl(url), url);
} }
@ -31,6 +26,10 @@ public class PlaylistInfo extends ListInfo {
return getInfo(extractor); return getInfo(extractor);
} }
public static InfoItemsPage<StreamInfoItem> getMoreItems(StreamingService service, String url, String pageUrl) throws IOException, ExtractionException {
return service.getPlaylistExtractor(url).getPage(pageUrl);
}
/** /**
* Get PlaylistInfo from PlaylistExtractor * Get PlaylistInfo from PlaylistExtractor
* *
@ -75,8 +74,10 @@ public class PlaylistInfo extends ListInfo {
info.addError(e); info.addError(e);
} }
info.setRelatedItems(getInfoItemsOrLogError(info, extractor)); final InfoItemsPage<StreamInfoItem> itemsPage = ExtractorHelper.getItemsPageOrLogError(info, extractor);
info.setNextPageUrl(extractor.getNextPageUrl()); info.setRelatedItems(itemsPage.getItems());
info.setNextPageUrl(itemsPage.getNextPageUrl());
return info; return info;
} }

View File

@ -92,11 +92,11 @@ public class SoundcloudChannelExtractor extends ChannelExtractor {
@Nonnull @Nonnull
@Override @Override
public StreamInfoItemsCollector getInfoItems() throws ExtractionException { public InfoItemsPage<StreamInfoItem> getInitialPage() throws ExtractionException {
if(streamInfoItemsCollector == null) { if(streamInfoItemsCollector == null) {
computeNextPageAndGetStreams(); computeNextPageAndGetStreams();
} }
return streamInfoItemsCollector; return new InfoItemsPage<>(streamInfoItemsCollector, getNextPageUrl());
} }
@Override @Override
@ -123,7 +123,7 @@ public class SoundcloudChannelExtractor extends ChannelExtractor {
} }
@Override @Override
public InfoItemPage<StreamInfoItem> getPage(final String pageUrl) throws IOException, ExtractionException { public InfoItemsPage<StreamInfoItem> getPage(final String pageUrl) throws IOException, ExtractionException {
if (pageUrl == null || pageUrl.isEmpty()) { if (pageUrl == null || pageUrl.isEmpty()) {
throw new ExtractionException(new IllegalArgumentException("Page url is empty or null")); throw new ExtractionException(new IllegalArgumentException("Page url is empty or null"));
} }
@ -131,6 +131,6 @@ public class SoundcloudChannelExtractor extends ChannelExtractor {
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
String nextPageUrl = SoundcloudParsingHelper.getStreamsFromApiMinItems(15, collector, pageUrl); String nextPageUrl = SoundcloudParsingHelper.getStreamsFromApiMinItems(15, collector, pageUrl);
return new InfoItemPage<>(collector, nextPageUrl); return new InfoItemsPage<>(collector, nextPageUrl);
} }
} }

View File

@ -42,7 +42,7 @@ public class SoundcloudChartsExtractor extends KioskExtractor {
} }
@Override @Override
public InfoItemPage<StreamInfoItem> getPage(String pageUrl) throws IOException, ExtractionException { public InfoItemsPage<StreamInfoItem> getPage(String pageUrl) throws IOException, ExtractionException {
if (pageUrl == null || pageUrl.isEmpty()) { if (pageUrl == null || pageUrl.isEmpty()) {
throw new ExtractionException(new IllegalArgumentException("Page url is empty or null")); throw new ExtractionException(new IllegalArgumentException("Page url is empty or null"));
} }
@ -50,7 +50,7 @@ public class SoundcloudChartsExtractor extends KioskExtractor {
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
String nextPageUrl = SoundcloudParsingHelper.getStreamsFromApi(collector, pageUrl, true); String nextPageUrl = SoundcloudParsingHelper.getStreamsFromApi(collector, pageUrl, true);
return new InfoItemPage<>(collector, nextPageUrl); return new InfoItemsPage<>(collector, nextPageUrl);
} }
@ -86,10 +86,10 @@ public class SoundcloudChartsExtractor extends KioskExtractor {
@Nonnull @Nonnull
@Override @Override
public StreamInfoItemsCollector getInfoItems() throws IOException, ExtractionException { public InfoItemsPage<StreamInfoItem> getInitialPage() throws IOException, ExtractionException {
if(collector == null) { if(collector == null) {
computNextPageAndStreams(); computNextPageAndStreams();
} }
return collector; return new InfoItemsPage<>(collector, getNextPageUrl());
} }
} }

View File

@ -70,7 +70,7 @@ public class SoundcloudPlaylistExtractor extends PlaylistExtractor {
// If the thumbnail is null, traverse the items list and get a valid one, // If the thumbnail is null, traverse the items list and get a valid one,
// if it also fails, return null // if it also fails, return null
try { try {
final StreamInfoItemsCollector infoItems = getInfoItems(); final InfoItemsPage<StreamInfoItem> infoItems = getInitialPage();
if (infoItems.getItems().isEmpty()) return null; if (infoItems.getItems().isEmpty()) return null;
for (StreamInfoItem item : infoItems.getItems()) { for (StreamInfoItem item : infoItems.getItems()) {
@ -113,11 +113,11 @@ public class SoundcloudPlaylistExtractor extends PlaylistExtractor {
@Nonnull @Nonnull
@Override @Override
public StreamInfoItemsCollector getInfoItems() throws IOException, ExtractionException { public InfoItemsPage<StreamInfoItem> getInitialPage() throws IOException, ExtractionException {
if(streamInfoItemsCollector == null) { if (streamInfoItemsCollector == null) {
computeStreamsAndNextPageUrl(); computeStreamsAndNextPageUrl();
} }
return streamInfoItemsCollector; return new InfoItemsPage<>(streamInfoItemsCollector, getNextPageUrl());
} }
private void computeStreamsAndNextPageUrl() throws ExtractionException, IOException { private void computeStreamsAndNextPageUrl() throws ExtractionException, IOException {
@ -134,14 +134,14 @@ public class SoundcloudPlaylistExtractor extends PlaylistExtractor {
@Override @Override
public String getNextPageUrl() throws IOException, ExtractionException { public String getNextPageUrl() throws IOException, ExtractionException {
if(nextPageUrl == null) { if (nextPageUrl == null) {
computeStreamsAndNextPageUrl(); computeStreamsAndNextPageUrl();
} }
return nextPageUrl; return nextPageUrl;
} }
@Override @Override
public InfoItemPage<StreamInfoItem> getPage(String pageUrl) throws IOException, ExtractionException { public InfoItemsPage<StreamInfoItem> getPage(String pageUrl) throws IOException, ExtractionException {
if (pageUrl == null || pageUrl.isEmpty()) { if (pageUrl == null || pageUrl.isEmpty()) {
throw new ExtractionException(new IllegalArgumentException("Page url is empty or null")); throw new ExtractionException(new IllegalArgumentException("Page url is empty or null"));
} }
@ -149,6 +149,6 @@ public class SoundcloudPlaylistExtractor extends PlaylistExtractor {
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
String nextPageUrl = SoundcloudParsingHelper.getStreamsFromApiMinItems(15, collector, pageUrl); String nextPageUrl = SoundcloudParsingHelper.getStreamsFromApiMinItems(15, collector, pageUrl);
return new InfoItemPage<>(collector, nextPageUrl); return new InfoItemsPage<>(collector, nextPageUrl);
} }
} }

View File

@ -150,15 +150,15 @@ public class YoutubeChannelExtractor extends ChannelExtractor {
@Nonnull @Nonnull
@Override @Override
public StreamInfoItemsCollector getInfoItems() throws ExtractionException { public InfoItemsPage<StreamInfoItem> getInitialPage() throws ExtractionException {
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
Element ul = doc.select("ul[id=\"browse-items-primary\"]").first(); Element ul = doc.select("ul[id=\"browse-items-primary\"]").first();
collectStreamsFrom(collector, ul); collectStreamsFrom(collector, ul);
return collector; return new InfoItemsPage<>(collector, getNextPageUrl());
} }
@Override @Override
public InfoItemPage<StreamInfoItem> getPage(String pageUrl) throws IOException, ExtractionException { public InfoItemsPage<StreamInfoItem> getPage(String pageUrl) throws IOException, ExtractionException {
if (pageUrl == null || pageUrl.isEmpty()) { if (pageUrl == null || pageUrl.isEmpty()) {
throw new ExtractionException(new IllegalArgumentException("Page url is empty or null")); throw new ExtractionException(new IllegalArgumentException("Page url is empty or null"));
} }
@ -178,7 +178,7 @@ public class YoutubeChannelExtractor extends ChannelExtractor {
final Document ajaxHtml = Jsoup.parse(ajaxJson.getString("content_html"), pageUrl); final Document ajaxHtml = Jsoup.parse(ajaxJson.getString("content_html"), pageUrl);
collectStreamsFrom(collector, ajaxHtml.select("body").first()); collectStreamsFrom(collector, ajaxHtml.select("body").first());
return new InfoItemPage<>(collector, getNextPageUrlFromAjaxPage(ajaxJson, pageUrl)); return new InfoItemsPage<>(collector, getNextPageUrlFromAjaxPage(ajaxJson, pageUrl));
} }
private String getNextPageUrlFromAjaxPage(final JsonObject ajaxJson, final String pageUrl) private String getNextPageUrlFromAjaxPage(final JsonObject ajaxJson, final String pageUrl)

View File

@ -129,15 +129,15 @@ public class YoutubePlaylistExtractor extends PlaylistExtractor {
@Nonnull @Nonnull
@Override @Override
public StreamInfoItemsCollector getInfoItems() throws IOException, ExtractionException { public InfoItemsPage<StreamInfoItem> getInitialPage() throws IOException, ExtractionException {
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
Element tbody = doc.select("tbody[id=\"pl-load-more-destination\"]").first(); Element tbody = doc.select("tbody[id=\"pl-load-more-destination\"]").first();
collectStreamsFrom(collector, tbody); collectStreamsFrom(collector, tbody);
return collector; return new InfoItemsPage<>(collector, getNextPageUrl());
} }
@Override @Override
public InfoItemPage<StreamInfoItem> getPage(final String pageUrl) throws IOException, ExtractionException { public InfoItemsPage<StreamInfoItem> getPage(final String pageUrl) throws IOException, ExtractionException {
if (pageUrl == null || pageUrl.isEmpty()) { if (pageUrl == null || pageUrl.isEmpty()) {
throw new ExtractionException(new IllegalArgumentException("Page url is empty or null")); throw new ExtractionException(new IllegalArgumentException("Page url is empty or null"));
} }
@ -156,7 +156,7 @@ public class YoutubePlaylistExtractor extends PlaylistExtractor {
collectStreamsFrom(collector, pageHtml.select("tbody[id=\"pl-load-more-destination\"]").first()); collectStreamsFrom(collector, pageHtml.select("tbody[id=\"pl-load-more-destination\"]").first());
return new InfoItemPage<>(collector, getNextPageUrlFromAjax(pageJson, pageUrl)); return new InfoItemsPage<>(collector, getNextPageUrlFromAjax(pageJson, pageUrl));
} }
private String getNextPageUrlFromAjax(final JsonObject pageJson, final String pageUrl) private String getNextPageUrlFromAjax(final JsonObject pageJson, final String pageUrl)

View File

@ -25,7 +25,6 @@ import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element; import org.jsoup.nodes.Element;
import org.jsoup.select.Elements; import org.jsoup.select.Elements;
import org.schabi.newpipe.extractor.Downloader; import org.schabi.newpipe.extractor.Downloader;
import org.schabi.newpipe.extractor.ListExtractor;
import org.schabi.newpipe.extractor.StreamingService; import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.UrlIdHandler; import org.schabi.newpipe.extractor.UrlIdHandler;
import org.schabi.newpipe.extractor.exceptions.ExtractionException; import org.schabi.newpipe.extractor.exceptions.ExtractionException;
@ -70,7 +69,7 @@ public class YoutubeTrendingExtractor extends KioskExtractor {
} }
@Override @Override
public ListExtractor.InfoItemPage<StreamInfoItem> getPage(String pageUrl) { public InfoItemsPage<StreamInfoItem> getPage(String pageUrl) {
return null; return null;
} }
@ -89,7 +88,7 @@ public class YoutubeTrendingExtractor extends KioskExtractor {
@Nonnull @Nonnull
@Override @Override
public StreamInfoItemsCollector getInfoItems() throws ParsingException { public InfoItemsPage<StreamInfoItem> getInitialPage() throws ParsingException {
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId()); StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
Elements uls = doc.select("ul[class*=\"expanded-shelf-content-list\"]"); Elements uls = doc.select("ul[class*=\"expanded-shelf-content-list\"]");
for(Element ul : uls) { for(Element ul : uls) {
@ -165,6 +164,6 @@ public class YoutubeTrendingExtractor extends KioskExtractor {
} }
} }
return collector; return new InfoItemsPage<>(collector, getNextPageUrl());
} }
} }

View File

@ -4,9 +4,9 @@ import org.schabi.newpipe.extractor.Info;
import org.schabi.newpipe.extractor.InfoItem; import org.schabi.newpipe.extractor.InfoItem;
import org.schabi.newpipe.extractor.InfoItemsCollector; import org.schabi.newpipe.extractor.InfoItemsCollector;
import org.schabi.newpipe.extractor.ListExtractor; import org.schabi.newpipe.extractor.ListExtractor;
import org.schabi.newpipe.extractor.ListExtractor.InfoItemsPage;
import org.schabi.newpipe.extractor.stream.StreamExtractor; import org.schabi.newpipe.extractor.stream.StreamExtractor;
import org.schabi.newpipe.extractor.stream.StreamInfo; import org.schabi.newpipe.extractor.stream.StreamInfo;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
@ -14,40 +14,29 @@ import java.util.List;
public class ExtractorHelper { public class ExtractorHelper {
private ExtractorHelper() {} private ExtractorHelper() {}
public static List<InfoItem> getInfoItemsOrLogError(Info info, ListExtractor extractor) { public static <T extends InfoItem> InfoItemsPage<T> getItemsPageOrLogError(Info info, ListExtractor<T> extractor) {
InfoItemsCollector collector;
try { try {
collector = extractor.getInfoItems(); InfoItemsPage<T> page = extractor.getInitialPage();
info.addAllErrors(page.getErrors());
return page;
} catch (Exception e) { } catch (Exception e) {
info.addError(e); info.addError(e);
return Collections.emptyList(); return InfoItemsPage.emptyPage();
} }
// Get from collector
return getInfoItems(info, collector);
} }
public static List<InfoItem> getRelatedVideosOrLogError(StreamInfo info, StreamExtractor extractor) { public static List<InfoItem> getRelatedVideosOrLogError(StreamInfo info, StreamExtractor extractor) {
StreamInfoItemsCollector collector;
try { try {
collector = extractor.getRelatedVideos(); InfoItemsCollector<? extends InfoItem, ?> collector = extractor.getRelatedVideos();
} catch (Exception e) {
info.addError(e);
return Collections.emptyList();
}
// Get from collector
return getInfoItems(info, collector);
}
private static List<InfoItem> getInfoItems(Info info, InfoItemsCollector collector) {
List<InfoItem> result;
try {
result = collector.getItems();
info.addAllErrors(collector.getErrors()); info.addAllErrors(collector.getErrors());
//noinspection unchecked
return (List<InfoItem>) collector.getItems();
} catch (Exception e) { } catch (Exception e) {
info.addError(e); info.addError(e);
return Collections.emptyList(); return Collections.emptyList();
} }
return result;
} }
} }

View File

@ -1,7 +1,6 @@
package org.schabi.newpipe.extractor.services; package org.schabi.newpipe.extractor.services;
import org.schabi.newpipe.extractor.InfoItem; import org.schabi.newpipe.extractor.InfoItem;
import org.schabi.newpipe.extractor.InfoItemsCollector;
import org.schabi.newpipe.extractor.ListExtractor; import org.schabi.newpipe.extractor.ListExtractor;
import org.schabi.newpipe.extractor.stream.StreamInfoItem; import org.schabi.newpipe.extractor.stream.StreamInfoItem;
@ -32,28 +31,30 @@ public final class DefaultTests {
} }
} }
public static void defaultTestRelatedItems(ListExtractor extractor, int expectedServiceId) throws Exception { public static <T extends InfoItem> ListExtractor.InfoItemsPage<T> defaultTestRelatedItems(ListExtractor<T> extractor, int expectedServiceId) throws Exception {
final InfoItemsCollector<? extends InfoItem, ?> itemsCollector = extractor.getInfoItems(); final ListExtractor.InfoItemsPage<T> page = extractor.getInitialPage();
final List<? extends InfoItem> itemsList = itemsCollector.getItems(); final List<T> itemsList = page.getItems();
List<Throwable> errors = itemsCollector.getErrors(); List<Throwable> errors = page.getErrors();
defaultTestListOfItems(expectedServiceId, itemsList, errors); defaultTestListOfItems(expectedServiceId, itemsList, errors);
return page;
} }
public static ListExtractor.InfoItemPage<? extends InfoItem> defaultTestMoreItems(ListExtractor extractor, int expectedServiceId) throws Exception { public static <T extends InfoItem> ListExtractor.InfoItemsPage<T> defaultTestMoreItems(ListExtractor<T> extractor, int expectedServiceId) throws Exception {
assertTrue("Doesn't have more items", extractor.hasNextPage()); assertTrue("Doesn't have more items", extractor.hasNextPage());
ListExtractor.InfoItemPage<? extends InfoItem> nextPage = extractor.getPage(extractor.getNextPageUrl()); ListExtractor.InfoItemsPage<T> nextPage = extractor.getPage(extractor.getNextPageUrl());
assertTrue("Next page is empty", !nextPage.getItemsList().isEmpty()); final List<T> items = nextPage.getItems();
assertTrue("Next page is empty", !items.isEmpty());
assertEmptyErrors("Next page have errors", nextPage.getErrors()); assertEmptyErrors("Next page have errors", nextPage.getErrors());
defaultTestListOfItems(expectedServiceId, nextPage.getItemsList(), nextPage.getErrors()); defaultTestListOfItems(expectedServiceId, nextPage.getItems(), nextPage.getErrors());
return nextPage; return nextPage;
} }
public static void defaultTestGetPageInNewExtractor(ListExtractor extractor, ListExtractor newExtractor, int expectedServiceId) throws Exception { public static void defaultTestGetPageInNewExtractor(ListExtractor<? extends InfoItem> extractor, ListExtractor<? extends InfoItem> newExtractor, int expectedServiceId) throws Exception {
final String nextPageUrl = extractor.getNextPageUrl(); final String nextPageUrl = extractor.getNextPageUrl();
final ListExtractor.InfoItemPage<? extends InfoItem> page = newExtractor.getPage(nextPageUrl); final ListExtractor.InfoItemsPage<? extends InfoItem> page = newExtractor.getPage(nextPageUrl);
defaultTestListOfItems(expectedServiceId, page.getItemsList(), page.getErrors()); defaultTestListOfItems(expectedServiceId, page.getItems(), page.getErrors());
} }
} }

View File

@ -4,9 +4,10 @@ import org.junit.BeforeClass;
import org.junit.Ignore; import org.junit.Ignore;
import org.junit.Test; import org.junit.Test;
import org.schabi.newpipe.Downloader; import org.schabi.newpipe.Downloader;
import org.schabi.newpipe.extractor.InfoItemsCollector; import org.schabi.newpipe.extractor.ListExtractor;
import org.schabi.newpipe.extractor.NewPipe; import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.kiosk.KioskExtractor; import org.schabi.newpipe.extractor.kiosk.KioskExtractor;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import java.util.List; import java.util.List;
@ -47,29 +48,29 @@ public class SoundcloudChartsExtractorTest {
@Test @Test
public void testGetStreams() throws Exception { public void testGetStreams() throws Exception {
InfoItemsCollector collector = extractor.getInfoItems(); ListExtractor.InfoItemsPage<StreamInfoItem> page = extractor.getInitialPage();
if(!collector.getErrors().isEmpty()) { if(!page.getErrors().isEmpty()) {
System.err.println("----------"); System.err.println("----------");
List<Throwable> errors = collector.getErrors(); List<Throwable> errors = page.getErrors();
for(Throwable e: errors) { for(Throwable e: errors) {
e.printStackTrace(); e.printStackTrace();
System.err.println("----------"); System.err.println("----------");
} }
} }
assertTrue("no streams are received", assertTrue("no streams are received",
!collector.getItems().isEmpty() !page.getItems().isEmpty()
&& collector.getErrors().isEmpty()); && page.getErrors().isEmpty());
} }
@Test @Test
public void testGetStreamsErrors() throws Exception { public void testGetStreamsErrors() throws Exception {
assertTrue("errors during stream list extraction", extractor.getInfoItems().getErrors().isEmpty()); assertTrue("errors during stream list extraction", extractor.getInitialPage().getErrors().isEmpty());
} }
@Test @Test
public void testHasMoreStreams() throws Exception { public void testHasMoreStreams() throws Exception {
// Setup the streams // Setup the streams
extractor.getInfoItems(); extractor.getInitialPage();
assertTrue("has more streams", extractor.hasNextPage()); assertTrue("has more streams", extractor.hasNextPage());
} }
@ -80,9 +81,9 @@ public class SoundcloudChartsExtractorTest {
@Test @Test
public void testGetNextPage() throws Exception { public void testGetNextPage() throws Exception {
extractor.getInfoItems().getItems(); extractor.getInitialPage().getItems();
assertFalse("extractor has next streams", extractor.getPage(extractor.getNextPageUrl()) == null assertFalse("extractor has next streams", extractor.getPage(extractor.getNextPageUrl()) == null
|| extractor.getPage(extractor.getNextPageUrl()).getItemsList().isEmpty()); || extractor.getPage(extractor.getNextPageUrl()).getItems().isEmpty());
} }
@Test @Test

View File

@ -6,12 +6,12 @@ import org.junit.Test;
import org.junit.experimental.runners.Enclosed; import org.junit.experimental.runners.Enclosed;
import org.junit.runner.RunWith; import org.junit.runner.RunWith;
import org.schabi.newpipe.Downloader; import org.schabi.newpipe.Downloader;
import org.schabi.newpipe.extractor.InfoItem;
import org.schabi.newpipe.extractor.ListExtractor; import org.schabi.newpipe.extractor.ListExtractor;
import org.schabi.newpipe.extractor.NewPipe; import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.ServiceList; import org.schabi.newpipe.extractor.ServiceList;
import org.schabi.newpipe.extractor.playlist.PlaylistExtractor; import org.schabi.newpipe.extractor.playlist.PlaylistExtractor;
import org.schabi.newpipe.extractor.services.BasePlaylistExtractorTest; import org.schabi.newpipe.extractor.services.BasePlaylistExtractorTest;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import static org.junit.Assert.*; import static org.junit.Assert.*;
import static org.schabi.newpipe.extractor.ExtractorAsserts.assertIsSecureUrl; import static org.schabi.newpipe.extractor.ExtractorAsserts.assertIsSecureUrl;
@ -274,11 +274,11 @@ public class SoundcloudPlaylistExtractorTest {
@Test @Test
public void testMoreRelatedItems() throws Exception { public void testMoreRelatedItems() throws Exception {
ListExtractor.InfoItemPage<? extends InfoItem> currentPage = defaultTestMoreItems(extractor, ServiceList.SoundCloud.getServiceId()); ListExtractor.InfoItemsPage<StreamInfoItem> currentPage = defaultTestMoreItems(extractor, ServiceList.SoundCloud.getServiceId());
// Test for 2 more levels // Test for 2 more levels
for (int i = 0; i < 2; i++) { for (int i = 0; i < 2; i++) {
currentPage = extractor.getPage(currentPage.getNextPageUrl()); currentPage = extractor.getPage(currentPage.getNextPageUrl());
defaultTestListOfItems(SoundCloud.getServiceId(), currentPage.getItemsList(), currentPage.getErrors()); defaultTestListOfItems(SoundCloud.getServiceId(), currentPage.getItems(), currentPage.getErrors());
} }
} }

View File

@ -5,12 +5,12 @@ import org.junit.Test;
import org.junit.experimental.runners.Enclosed; import org.junit.experimental.runners.Enclosed;
import org.junit.runner.RunWith; import org.junit.runner.RunWith;
import org.schabi.newpipe.Downloader; import org.schabi.newpipe.Downloader;
import org.schabi.newpipe.extractor.InfoItem;
import org.schabi.newpipe.extractor.ListExtractor; import org.schabi.newpipe.extractor.ListExtractor;
import org.schabi.newpipe.extractor.NewPipe; import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.ServiceList; import org.schabi.newpipe.extractor.ServiceList;
import org.schabi.newpipe.extractor.playlist.PlaylistExtractor; import org.schabi.newpipe.extractor.playlist.PlaylistExtractor;
import org.schabi.newpipe.extractor.services.BasePlaylistExtractorTest; import org.schabi.newpipe.extractor.services.BasePlaylistExtractorTest;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
@ -181,11 +181,11 @@ public class YoutubePlaylistExtractorTest {
@Test @Test
public void testMoreRelatedItems() throws Exception { public void testMoreRelatedItems() throws Exception {
ListExtractor.InfoItemPage<? extends InfoItem> currentPage = defaultTestMoreItems(extractor, ServiceList.YouTube.getServiceId()); ListExtractor.InfoItemsPage<StreamInfoItem> currentPage = defaultTestMoreItems(extractor, ServiceList.YouTube.getServiceId());
// Test for 2 more levels // Test for 2 more levels
for (int i = 0; i < 2; i++) { for (int i = 0; i < 2; i++) {
currentPage = extractor.getPage(currentPage.getNextPageUrl()); currentPage = extractor.getPage(currentPage.getNextPageUrl());
defaultTestListOfItems(YouTube.getServiceId(), currentPage.getItemsList(), currentPage.getErrors()); defaultTestListOfItems(YouTube.getServiceId(), currentPage.getItems(), currentPage.getErrors());
} }
} }

View File

@ -23,8 +23,9 @@ package org.schabi.newpipe.extractor.services.youtube;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.Test; import org.junit.Test;
import org.schabi.newpipe.Downloader; import org.schabi.newpipe.Downloader;
import org.schabi.newpipe.extractor.InfoItemsCollector; import org.schabi.newpipe.extractor.ListExtractor;
import org.schabi.newpipe.extractor.NewPipe; import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.utils.Utils; import org.schabi.newpipe.extractor.utils.Utils;
import static junit.framework.TestCase.assertFalse; import static junit.framework.TestCase.assertFalse;
@ -66,27 +67,27 @@ public class YoutubeTrendingExtractorTest {
@Test @Test
public void testGetStreamsQuantity() throws Exception { public void testGetStreamsQuantity() throws Exception {
InfoItemsCollector collector = extractor.getInfoItems(); ListExtractor.InfoItemsPage<StreamInfoItem> page = extractor.getInitialPage();
Utils.printErrors(collector.getErrors()); Utils.printErrors(page.getErrors());
assertTrue("no streams are received", collector.getItems().size() >= 20); assertTrue("no streams are received", page.getItems().size() >= 20);
} }
@Test @Test
public void testGetStreamsErrors() throws Exception { public void testGetStreamsErrors() throws Exception {
assertEmptyErrors("errors during stream list extraction", extractor.getInfoItems().getErrors()); assertEmptyErrors("errors during stream list extraction", extractor.getInitialPage().getErrors());
} }
@Test @Test
public void testHasMoreStreams() throws Exception { public void testHasMoreStreams() throws Exception {
// Setup the streams // Setup the streams
extractor.getInfoItems(); extractor.getInitialPage();
assertFalse("has more streams", extractor.hasNextPage()); assertFalse("has more streams", extractor.hasNextPage());
} }
@Test @Test
public void testGetNextPage() { public void testGetNextPage() {
assertTrue("extractor has next streams", extractor.getPage(extractor.getNextPageUrl()) == null assertTrue("extractor has next streams", extractor.getPage(extractor.getNextPageUrl()) == null
|| extractor.getPage(extractor.getNextPageUrl()).getItemsList().isEmpty()); || extractor.getPage(extractor.getNextPageUrl()).getItems().isEmpty());
} }
@Test @Test