Merge branch 'dev' into dev

This commit is contained in:
fynngodau 2020-03-17 18:03:14 +01:00 committed by GitHub
commit b78f788017
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
141 changed files with 3413 additions and 2477 deletions

View File

@ -21,7 +21,7 @@ To test changes quickly you can build the library locally. Using the local Maven
2. It's _recommended_ that you change the `version` of this library (e.g. `LOCAL_SNAPSHOT`).
3. Run gradle's `ìnstall` task to deploy this library to your local repository (using the wrapper, present in the root of this project: `./gradlew install`)
4. Change the dependency version used in your project to match the one you chose in step 2 (`implementation 'com.github.TeamNewPipe:NewPipeExtractor:LOCAL_SNAPSHOT'`)
> Tip for Android Studio users: After you make changes and run the `install` task, use the menu option `File → "Sync with File System"` to refresh the library in your project.
## Supported sites

View File

@ -5,7 +5,7 @@ allprojects {
sourceCompatibility = 1.7
targetCompatibility = 1.7
version 'v0.18.0'
version 'v0.18.6'
group 'com.github.TeamNewPipe'
repositories {

View File

@ -1,10 +1,5 @@
package org.schabi.newpipe.extractor;
import java.io.IOException;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
@ -13,7 +8,11 @@ import org.schabi.newpipe.extractor.localization.ContentCountry;
import org.schabi.newpipe.extractor.localization.Localization;
import org.schabi.newpipe.extractor.localization.TimeAgoParser;
public abstract class Extractor{
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.io.IOException;
public abstract class Extractor {
/**
* {@link StreamingService} currently related to this extractor.<br>
* Useful for getting other things from a service (like the url handlers for cleaning/accepting/get id from urls).
@ -21,19 +20,21 @@ public abstract class Extractor{
private final StreamingService service;
private final LinkHandler linkHandler;
@Nullable private Localization forcedLocalization = null;
@Nullable private ContentCountry forcedContentCountry = null;
@Nullable
private Localization forcedLocalization = null;
@Nullable
private ContentCountry forcedContentCountry = null;
private boolean pageFetched = false;
private final Downloader downloader;
public Extractor(final StreamingService service, final LinkHandler linkHandler) {
if(service == null) throw new NullPointerException("service is null");
if(linkHandler == null) throw new NullPointerException("LinkHandler is null");
if (service == null) throw new NullPointerException("service is null");
if (linkHandler == null) throw new NullPointerException("LinkHandler is null");
this.service = service;
this.linkHandler = linkHandler;
this.downloader = NewPipe.getDownloader();
if(downloader == null) throw new NullPointerException("downloader is null");
if (downloader == null) throw new NullPointerException("downloader is null");
}
/**
@ -46,11 +47,12 @@ public abstract class Extractor{
/**
* Fetch the current page.
* @throws IOException if the page can not be loaded
*
* @throws IOException if the page can not be loaded
* @throws ExtractionException if the pages content is not understood
*/
public void fetchPage() throws IOException, ExtractionException {
if(pageFetched) return;
if (pageFetched) return;
onFetchPage(downloader);
pageFetched = true;
}
@ -65,8 +67,9 @@ public abstract class Extractor{
/**
* Fetch the current page.
*
* @param downloader the download to use
* @throws IOException if the page can not be loaded
* @throws IOException if the page can not be loaded
* @throws ExtractionException if the pages content is not understood
*/
public abstract void onFetchPage(@Nonnull Downloader downloader) throws IOException, ExtractionException;
@ -78,6 +81,7 @@ public abstract class Extractor{
/**
* Get the name
*
* @return the name
* @throws ParsingException if the name cannot be extracted
*/
@ -93,10 +97,10 @@ public abstract class Extractor{
public String getUrl() throws ParsingException {
return linkHandler.getUrl();
}
@Nonnull
public String getBaseUrl() throws ParsingException {
return linkHandler.getBaseUrl();
return linkHandler.getBaseUrl();
}
@Nonnull

View File

@ -27,7 +27,7 @@ import java.util.List;
* along with NewPipe. If not, see <http://www.gnu.org/licenses/>.
*/
public abstract class InfoItemsCollector<I extends InfoItem, E extends InfoItemExtractor> implements Collector<I,E> {
public abstract class InfoItemsCollector<I extends InfoItem, E extends InfoItemExtractor> implements Collector<I, E> {
private final List<I> itemList = new ArrayList<>();
private final List<Throwable> errors = new ArrayList<>();

View File

@ -115,26 +115,28 @@ public enum MediaFormat {
}
/**
* Get the media format by it's id.
* Get the media format by its id.
*
* @param id the id
* @return the id of the media format or null.
*/
public static MediaFormat getFormatById(int id) {
for (MediaFormat vf: values()) {
for (MediaFormat vf : values()) {
if (vf.id == id) return vf;
}
return null;
}
public static MediaFormat getFromSuffix(String suffix) {
for (MediaFormat vf: values()) {
for (MediaFormat vf : values()) {
if (vf.suffix.equals(suffix)) return vf;
}
return null;
}
/**
* Get the name of the format
*
* @return the name of the format
*/
public String getName() {
@ -143,6 +145,7 @@ public enum MediaFormat {
/**
* Get the filename extension
*
* @return the filename extension
*/
public String getSuffix() {
@ -151,10 +154,11 @@ public enum MediaFormat {
/**
* Get the mime type
*
* @return the mime type
*/
public String getMimeType() {
return mimeType;
}
}

View File

@ -1,15 +1,15 @@
package org.schabi.newpipe.extractor;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.schabi.newpipe.extractor.services.bandcamp.BandcampService;
import org.schabi.newpipe.extractor.services.media_ccc.MediaCCCService;
import org.schabi.newpipe.extractor.services.peertube.PeertubeService;
import org.schabi.newpipe.extractor.services.soundcloud.SoundcloudService;
import org.schabi.newpipe.extractor.services.youtube.YoutubeService;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
/*
* Copyright (C) Christian Schabesberger 2018 <chris.schabesberger@mailbox.org>
* ServiceList.java is part of NewPipe.

View File

@ -1,20 +1,12 @@
package org.schabi.newpipe.extractor;
import java.util.Collections;
import java.util.List;
import org.schabi.newpipe.extractor.channel.ChannelExtractor;
import org.schabi.newpipe.extractor.comments.CommentsExtractor;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.feed.FeedExtractor;
import org.schabi.newpipe.extractor.kiosk.KioskList;
import org.schabi.newpipe.extractor.linkhandler.LinkHandler;
import org.schabi.newpipe.extractor.linkhandler.LinkHandlerFactory;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandlerFactory;
import org.schabi.newpipe.extractor.linkhandler.SearchQueryHandler;
import org.schabi.newpipe.extractor.linkhandler.SearchQueryHandlerFactory;
import org.schabi.newpipe.extractor.linkhandler.*;
import org.schabi.newpipe.extractor.localization.ContentCountry;
import org.schabi.newpipe.extractor.localization.Localization;
import org.schabi.newpipe.extractor.localization.TimeAgoParser;
@ -26,6 +18,8 @@ import org.schabi.newpipe.extractor.subscription.SubscriptionExtractor;
import org.schabi.newpipe.extractor.suggestion.SuggestionExtractor;
import javax.annotation.Nullable;
import java.util.Collections;
import java.util.List;
/*
* Copyright (C) Christian Schabesberger 2018 <chris.schabesberger@mailbox.org>
@ -269,7 +263,7 @@ public abstract class StreamingService {
public CommentsExtractor getCommentsExtractor(String url) throws ExtractionException {
ListLinkHandlerFactory llhf = getCommentsLHFactory();
if(null == llhf) {
if (llhf == null) {
return null;
}
return getCommentsExtractor(llhf.fromUrl(url));

View File

@ -33,7 +33,7 @@ public class ChannelInfoItemsCollector extends InfoItemsCollector<ChannelInfoIte
// important information
int serviceId = getServiceId();
String name = extractor.getName();
String url = extractor.getUrl();
String url = extractor.getUrl();
ChannelInfoItem resultItem = new ChannelInfoItem(serviceId, url, name);

View File

@ -1,7 +1,5 @@
package org.schabi.newpipe.extractor.comments;
import java.io.IOException;
import org.schabi.newpipe.extractor.ListExtractor.InfoItemsPage;
import org.schabi.newpipe.extractor.ListInfo;
import org.schabi.newpipe.extractor.NewPipe;
@ -10,20 +8,21 @@ import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler;
import org.schabi.newpipe.extractor.utils.ExtractorHelper;
public class CommentsInfo extends ListInfo<CommentsInfoItem>{
import java.io.IOException;
private CommentsInfo(int serviceId, ListLinkHandler listUrlIdHandler, String name) {
super(serviceId, listUrlIdHandler, name);
// TODO Auto-generated constructor stub
}
public static CommentsInfo getInfo(String url) throws IOException, ExtractionException {
public class CommentsInfo extends ListInfo<CommentsInfoItem> {
private CommentsInfo(int serviceId, ListLinkHandler listUrlIdHandler, String name) {
super(serviceId, listUrlIdHandler, name);
}
public static CommentsInfo getInfo(String url) throws IOException, ExtractionException {
return getInfo(NewPipe.getServiceByUrl(url), url);
}
public static CommentsInfo getInfo(StreamingService serviceByUrl, String url) throws ExtractionException, IOException {
return getInfo(serviceByUrl.getCommentsExtractor(url));
}
public static CommentsInfo getInfo(StreamingService serviceByUrl, String url) throws ExtractionException, IOException {
return getInfo(serviceByUrl.getCommentsExtractor(url));
}
private static CommentsInfo getInfo(CommentsExtractor commentsExtractor) throws IOException, ExtractionException {
// for services which do not have a comments extractor
@ -44,21 +43,21 @@ public class CommentsInfo extends ListInfo<CommentsInfoItem>{
return commentsInfo;
}
public static InfoItemsPage<CommentsInfoItem> getMoreItems(CommentsInfo commentsInfo, String pageUrl)
throws ExtractionException, IOException {
return getMoreItems(NewPipe.getService(commentsInfo.getServiceId()), commentsInfo, pageUrl);
}
public static InfoItemsPage<CommentsInfoItem> getMoreItems(StreamingService service, CommentsInfo commentsInfo,
String pageUrl) throws IOException, ExtractionException {
String pageUrl) throws IOException, ExtractionException {
if (null == commentsInfo.getCommentsExtractor()) {
commentsInfo.setCommentsExtractor(service.getCommentsExtractor(commentsInfo.getUrl()));
commentsInfo.getCommentsExtractor().fetchPage();
}
return commentsInfo.getCommentsExtractor().getPage(pageUrl);
}
private transient CommentsExtractor commentsExtractor;
public CommentsExtractor getCommentsExtractor() {

View File

@ -15,12 +15,16 @@ public class Response {
private final Map<String, List<String>> responseHeaders;
private final String responseBody;
public Response(int responseCode, String responseMessage, Map<String, List<String>> responseHeaders, @Nullable String responseBody) {
private final String latestUrl;
public Response(int responseCode, String responseMessage, Map<String, List<String>> responseHeaders,
@Nullable String responseBody, @Nullable String latestUrl) {
this.responseCode = responseCode;
this.responseMessage = responseMessage;
this.responseHeaders = responseHeaders != null ? responseHeaders : Collections.<String, List<String>>emptyMap();
this.responseBody = responseBody == null ? "" : responseBody;
this.latestUrl = latestUrl;
}
public int responseCode() {
@ -40,6 +44,16 @@ public class Response {
return responseBody;
}
/**
* Used for detecting a possible redirection, limited to the latest one.
*
* @return latest url known right before this response object was created
*/
@Nonnull
public String latestUrl() {
return latestUrl;
}
/*//////////////////////////////////////////////////////////////////////////
// Utils
//////////////////////////////////////////////////////////////////////////*/
@ -54,7 +68,8 @@ public class Response {
@Nullable
public String getHeader(String name) {
for (Map.Entry<String, List<String>> headerEntry : responseHeaders.entrySet()) {
if (headerEntry.getKey().equalsIgnoreCase(name)) {
final String key = headerEntry.getKey();
if (key != null && key.equalsIgnoreCase(name)) {
if (headerEntry.getValue().size() > 0) {
return headerEntry.getValue().get(0);
}

View File

@ -25,7 +25,6 @@ import org.schabi.newpipe.extractor.ListExtractor;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import javax.annotation.Nonnull;

View File

@ -13,27 +13,30 @@ import java.util.HashMap;
import java.util.Map;
import java.util.Set;
public class KioskList {
public class KioskList {
public interface KioskExtractorFactory {
KioskExtractor createNewKiosk(final StreamingService streamingService,
final String url,
final String kioskId)
throws ExtractionException, IOException;
throws ExtractionException, IOException;
}
private final StreamingService service;
private final HashMap<String, KioskEntry> kioskList = new HashMap<>();
private String defaultKiosk = null;
@Nullable private Localization forcedLocalization;
@Nullable private ContentCountry forcedContentCountry;
@Nullable
private Localization forcedLocalization;
@Nullable
private ContentCountry forcedContentCountry;
private class KioskEntry {
public KioskEntry(KioskExtractorFactory ef, ListLinkHandlerFactory h) {
extractorFactory = ef;
handlerFactory = h;
}
final KioskExtractorFactory extractorFactory;
final ListLinkHandlerFactory handlerFactory;
}
@ -43,8 +46,8 @@ public class KioskList {
}
public void addKioskEntry(KioskExtractorFactory extractorFactory, ListLinkHandlerFactory handlerFactory, String id)
throws Exception {
if(kioskList.get(id) != null) {
throws Exception {
if (kioskList.get(id) != null) {
throw new Exception("Kiosk with type " + id + " already exists.");
}
kioskList.put(id, new KioskEntry(extractorFactory, handlerFactory));
@ -66,10 +69,10 @@ public class KioskList {
public KioskExtractor getDefaultKioskExtractor(String nextPageUrl, Localization localization)
throws ExtractionException, IOException {
if(defaultKiosk != null && !defaultKiosk.equals("")) {
if (defaultKiosk != null && !defaultKiosk.equals("")) {
return getExtractorById(defaultKiosk, nextPageUrl, localization);
} else {
if(!kioskList.isEmpty()) {
if (!kioskList.isEmpty()) {
// if not set get any entry
Object[] keySet = kioskList.keySet().toArray();
return getExtractorById(keySet[0].toString(), nextPageUrl, localization);
@ -91,7 +94,7 @@ public class KioskList {
public KioskExtractor getExtractorById(String kioskId, String nextPageUrl, Localization localization)
throws ExtractionException, IOException {
KioskEntry ke = kioskList.get(kioskId);
if(ke == null) {
if (ke == null) {
throw new ExtractionException("No kiosk found with the type: " + kioskId);
} else {
final KioskExtractor kioskExtractor = ke.extractorFactory.createNewKiosk(service,
@ -109,15 +112,15 @@ public class KioskList {
}
public KioskExtractor getExtractorByUrl(String url, String nextPageUrl)
throws ExtractionException, IOException{
throws ExtractionException, IOException {
return getExtractorByUrl(url, nextPageUrl, NewPipe.getPreferredLocalization());
}
public KioskExtractor getExtractorByUrl(String url, String nextPageUrl, Localization localization)
throws ExtractionException, IOException {
for(Map.Entry<String, KioskEntry> e : kioskList.entrySet()) {
for (Map.Entry<String, KioskEntry> e : kioskList.entrySet()) {
KioskEntry ke = e.getValue();
if(ke.handlerFactory.acceptUrl(url)) {
if (ke.handlerFactory.acceptUrl(url)) {
return getExtractorById(ke.handlerFactory.getId(url), nextPageUrl, localization);
}
}

View File

@ -1,10 +1,10 @@
package org.schabi.newpipe.extractor.linkhandler;
import java.io.Serializable;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.utils.Utils;
import java.io.Serializable;
public class LinkHandler implements Serializable {
protected final String originalUrl;
protected final String url;
@ -31,8 +31,8 @@ public class LinkHandler implements Serializable {
public String getId() {
return id;
}
public String getBaseUrl() throws ParsingException {
return Utils.getBaseUrl(url);
return Utils.getBaseUrl(url);
}
}

View File

@ -35,9 +35,9 @@ public abstract class LinkHandlerFactory {
public abstract boolean onAcceptUrl(final String url) throws ParsingException;
public String getUrl(String id, String baseUrl) throws ParsingException {
return getUrl(id);
return getUrl(id);
}
///////////////////////////////////
// Logic
///////////////////////////////////
@ -47,7 +47,7 @@ public abstract class LinkHandlerFactory {
final String baseUrl = Utils.getBaseUrl(url);
return fromUrl(url, baseUrl);
}
public LinkHandler fromUrl(String url, String baseUrl) throws ParsingException {
if (url == null) throw new IllegalArgumentException("url can not be null");
if (!acceptUrl(url)) {
@ -55,9 +55,9 @@ public abstract class LinkHandlerFactory {
}
final String id = getId(url);
return new LinkHandler(url, getUrl(id,baseUrl), id);
return new LinkHandler(url, getUrl(id, baseUrl), id);
}
public LinkHandler fromId(String id) throws ParsingException {
if (id == null) throw new IllegalArgumentException("id can not be null");
final String url = getUrl(id);
@ -82,5 +82,5 @@ public abstract class LinkHandlerFactory {
throw fe;
}
}
}

View File

@ -1,29 +1,35 @@
package org.schabi.newpipe.extractor.linkhandler;
import java.util.ArrayList;
import java.util.List;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.utils.Utils;
import java.util.ArrayList;
import java.util.List;
public abstract class ListLinkHandlerFactory extends LinkHandlerFactory {
///////////////////////////////////
// To Override
///////////////////////////////////
public List<String> getContentFilter(String url) throws ParsingException { return new ArrayList<>(0);}
public String getSortFilter(String url) throws ParsingException {return ""; }
public List<String> getContentFilter(String url) throws ParsingException {
return new ArrayList<>(0);
}
public String getSortFilter(String url) throws ParsingException {
return "";
}
public abstract String getUrl(String id, List<String> contentFilter, String sortFilter) throws ParsingException;
public String getUrl(String id, List<String> contentFilter, String sortFilter, String baseUrl) throws ParsingException {
return getUrl(id, contentFilter, sortFilter);
return getUrl(id, contentFilter, sortFilter);
}
///////////////////////////////////
// Logic
///////////////////////////////////
@Override
public ListLinkHandler fromUrl(String url) throws ParsingException {
String baseUrl = Utils.getBaseUrl(url);
@ -32,7 +38,7 @@ public abstract class ListLinkHandlerFactory extends LinkHandlerFactory {
@Override
public ListLinkHandler fromUrl(String url, String baseUrl) throws ParsingException {
if(url == null) throw new IllegalArgumentException("url may not be null");
if (url == null) throw new IllegalArgumentException("url may not be null");
return new ListLinkHandler(super.fromUrl(url, baseUrl), getContentFilter(url), getSortFilter(url));
}
@ -41,7 +47,7 @@ public abstract class ListLinkHandlerFactory extends LinkHandlerFactory {
public ListLinkHandler fromId(String id) throws ParsingException {
return new ListLinkHandler(super.fromId(id), new ArrayList<String>(0), "");
}
@Override
public ListLinkHandler fromId(String id, String baseUrl) throws ParsingException {
return new ListLinkHandler(super.fromId(id, baseUrl), new ArrayList<String>(0), "");
@ -53,7 +59,7 @@ public abstract class ListLinkHandlerFactory extends LinkHandlerFactory {
final String url = getUrl(id, contentFilters, sortFilter);
return new ListLinkHandler(url, url, id, contentFilters, sortFilter);
}
public ListLinkHandler fromQuery(String id,
List<String> contentFilters,
String sortFilter, String baseUrl) throws ParsingException {
@ -61,10 +67,11 @@ public abstract class ListLinkHandlerFactory extends LinkHandlerFactory {
return new ListLinkHandler(url, url, id, contentFilters, sortFilter);
}
/**
* For makeing ListLinkHandlerFactory compatible with LinkHandlerFactory we need to override this,
* For making ListLinkHandlerFactory compatible with LinkHandlerFactory we need to override this,
* however it should not be overridden by the actual implementation.
*
* @param id
* @return the url coresponding to id without any filters applied
*/
@ -76,7 +83,7 @@ public abstract class ListLinkHandlerFactory extends LinkHandlerFactory {
public String getUrl(String id, String baseUrl) throws ParsingException {
return getUrl(id, new ArrayList<String>(0), "", baseUrl);
}
/**
* Will returns content filter the corresponding extractor can handle like "channels", "videos", "music", etc.
*

View File

@ -24,6 +24,7 @@ public class SearchQueryHandler extends ListLinkHandler {
/**
* Returns the search string. Since ListQIHandler is based on ListLinkHandler
* getSearchString() is equivalent to calling getId().
*
* @return the search string
*/
public String getSearchString() {

View File

@ -13,14 +13,19 @@ public abstract class SearchQueryHandlerFactory extends ListLinkHandlerFactory {
@Override
public abstract String getUrl(String querry, List<String> contentFilter, String sortFilter) throws ParsingException;
public String getSearchString(String url) { return "";}
public String getSearchString(String url) {
return "";
}
///////////////////////////////////
// Logic
///////////////////////////////////
@Override
public String getId(String url) { return getSearchString(url); }
public String getId(String url) {
return getSearchString(url);
}
@Override
public SearchQueryHandler fromQuery(String querry,
@ -34,10 +39,13 @@ public abstract class SearchQueryHandlerFactory extends ListLinkHandlerFactory {
}
/**
* It's not mandatorry for NewPipe to handle the Url
* It's not mandatory for NewPipe to handle the Url
*
* @param url
* @return
*/
@Override
public boolean onAcceptUrl(String url) { return false; }
public boolean onAcceptUrl(String url) {
return false;
}
}

View File

@ -3,7 +3,10 @@ package org.schabi.newpipe.extractor.localization;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.io.Serializable;
import java.util.*;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
public class Localization implements Serializable {
public static final Localization DEFAULT = new Localization("en", "GB");

View File

@ -21,8 +21,9 @@ public class TimeAgoParser {
/**
* Creates a helper to parse upload dates in the format '2 days ago'.
* <p>
* Instantiate a new {@link TimeAgoParser} every time you extract a new batch of items.
* Instantiate a new {@link TimeAgoParser} every time you extract a new batch of items.
* </p>
*
* @param patternsHolder An object that holds the "time ago" patterns, special cases, and the language word separator.
*/
public TimeAgoParser(PatternsHolder patternsHolder) {
@ -164,6 +165,7 @@ public class TimeAgoParser {
/**
* Marks the time as approximated by setting minutes, seconds and milliseconds to 0.
*
* @param calendarTime Time to be marked as approximated
*/
private void markApproximatedTime(Calendar calendarTime) {

View File

@ -5,7 +5,6 @@ import org.schabi.newpipe.extractor.InfoItemExtractor;
import org.schabi.newpipe.extractor.InfoItemsCollector;
import org.schabi.newpipe.extractor.channel.ChannelInfoItemExtractor;
import org.schabi.newpipe.extractor.channel.ChannelInfoItemsCollector;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.playlist.PlaylistInfoItemExtractor;
import org.schabi.newpipe.extractor.playlist.PlaylistInfoItemsCollector;
@ -34,7 +33,7 @@ import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
/**
* Collector for search results
*
* <p>
* This collector can handle the following extractor types:
* <ul>
* <li>{@link StreamInfoItemExtractor}</li>
@ -59,11 +58,11 @@ public class InfoItemsSearchCollector extends InfoItemsCollector<InfoItem, InfoI
@Override
public InfoItem extract(InfoItemExtractor extractor) throws ParsingException {
// Use the corresponding collector for each item extractor type
if(extractor instanceof StreamInfoItemExtractor) {
if (extractor instanceof StreamInfoItemExtractor) {
return streamCollector.extract((StreamInfoItemExtractor) extractor);
} else if(extractor instanceof ChannelInfoItemExtractor) {
} else if (extractor instanceof ChannelInfoItemExtractor) {
return userCollector.extract((ChannelInfoItemExtractor) extractor);
} else if(extractor instanceof PlaylistInfoItemExtractor) {
} else if (extractor instanceof PlaylistInfoItemExtractor) {
return playlistCollector.extract((PlaylistInfoItemExtractor) extractor);
} else {
throw new IllegalArgumentException("Invalid extractor type: " + extractor);

View File

@ -55,7 +55,7 @@ public class MediaCCCConferenceExtractor extends ChannelExtractor {
public InfoItemsPage<StreamInfoItem> getInitialPage() throws IOException, ExtractionException {
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
JsonArray events = conferenceData.getArray("events");
for(int i = 0; i < events.size(); i++) {
for (int i = 0; i < events.size(); i++) {
collector.commit(new MediaCCCStreamInfoItemExtractor(events.getObject(i)));
}
return new InfoItemsPage<>(collector, null);

View File

@ -32,7 +32,7 @@ public class MediaCCCConferenceKiosk extends KioskExtractor<ChannelInfoItem> {
public InfoItemsPage<ChannelInfoItem> getInitialPage() throws IOException, ExtractionException {
JsonArray conferences = doc.getArray("conferences");
ChannelInfoItemsCollector collector = new ChannelInfoItemsCollector(getServiceId());
for(int i = 0; i < conferences.size(); i++) {
for (int i = 0; i < conferences.size(); i++) {
collector.commit(new MediaCCCConferenceInfoItemExtractor(conferences.getObject(i)));
}

View File

@ -50,7 +50,7 @@ public class MediaCCCSearchExtractor extends SearchExtractor {
InfoItemsSearchCollector searchItems = getInfoItemSearchCollector();
searchItems.reset();
if(getLinkHandler().getContentFilters().contains(CONFERENCES)
if (getLinkHandler().getContentFilters().contains(CONFERENCES)
|| getLinkHandler().getContentFilters().contains(ALL)
|| getLinkHandler().getContentFilters().isEmpty()) {
searchConferences(getSearchString(),
@ -58,7 +58,7 @@ public class MediaCCCSearchExtractor extends SearchExtractor {
searchItems);
}
if(getLinkHandler().getContentFilters().contains(EVENTS)
if (getLinkHandler().getContentFilters().contains(EVENTS)
|| getLinkHandler().getContentFilters().contains(ALL)
|| getLinkHandler().getContentFilters().isEmpty()) {
JsonArray events = doc.getArray("events");
@ -82,8 +82,8 @@ public class MediaCCCSearchExtractor extends SearchExtractor {
@Override
public void onFetchPage(@Nonnull Downloader downloader) throws IOException, ExtractionException {
if(getLinkHandler().getContentFilters().contains(EVENTS)
|| getLinkHandler().getContentFilters().contains(ALL)
if (getLinkHandler().getContentFilters().contains(EVENTS)
|| getLinkHandler().getContentFilters().contains(ALL)
|| getLinkHandler().getContentFilters().isEmpty()) {
final String site;
final String url = getUrl();
@ -94,17 +94,17 @@ public class MediaCCCSearchExtractor extends SearchExtractor {
throw new ExtractionException("Could not parse json.", jpe);
}
}
if(getLinkHandler().getContentFilters().contains(CONFERENCES)
if (getLinkHandler().getContentFilters().contains(CONFERENCES)
|| getLinkHandler().getContentFilters().contains(ALL)
|| getLinkHandler().getContentFilters().isEmpty())
conferenceKiosk.fetchPage();
conferenceKiosk.fetchPage();
}
private void searchConferences(String searchString,
List<ChannelInfoItem> channelItems,
InfoItemsSearchCollector collector) {
for(final ChannelInfoItem item : channelItems) {
if(item.getName().toUpperCase().contains(
List<ChannelInfoItem> channelItems,
InfoItemsSearchCollector collector) {
for (final ChannelInfoItem item : channelItems) {
if (item.getName().toUpperCase().contains(
searchString.toUpperCase())) {
collector.commit(new ChannelInfoItemExtractor() {
@Override

View File

@ -16,7 +16,9 @@ import org.schabi.newpipe.extractor.stream.*;
import javax.annotation.Nonnull;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
public class MediaCCCStreamExtractor extends StreamExtractor {
@ -47,8 +49,8 @@ public class MediaCCCStreamExtractor extends StreamExtractor {
@Nonnull
@Override
public String getDescription() throws ParsingException {
return data.getString("description");
public Description getDescription() throws ParsingException {
return new Description(data.getString("description"), Description.PLAIN_TEXT);
}
@Override
@ -103,30 +105,30 @@ public class MediaCCCStreamExtractor extends StreamExtractor {
@Nonnull
@Override
public String getDashMpdUrl() throws ParsingException {
return null;
return "";
}
@Nonnull
@Override
public String getHlsUrl() throws ParsingException {
return null;
return "";
}
@Override
public List<AudioStream> getAudioStreams() throws IOException, ExtractionException {
final JsonArray recordings = data.getArray("recordings");
final List<AudioStream> audioStreams = new ArrayList<>();
for(int i = 0; i < recordings.size(); i++) {
for (int i = 0; i < recordings.size(); i++) {
final JsonObject recording = recordings.getObject(i);
final String mimeType = recording.getString("mime_type");
if(mimeType.startsWith("audio")) {
if (mimeType.startsWith("audio")) {
//first we need to resolve the actual video data from CDN
final MediaFormat mediaFormat;
if(mimeType.endsWith("opus")) {
if (mimeType.endsWith("opus")) {
mediaFormat = MediaFormat.OPUS;
} else if(mimeType.endsWith("mpeg")) {
} else if (mimeType.endsWith("mpeg")) {
mediaFormat = MediaFormat.MP3;
} else if(mimeType.endsWith("ogg")){
} else if (mimeType.endsWith("ogg")) {
mediaFormat = MediaFormat.OGG;
} else {
throw new ExtractionException("Unknown media format: " + mimeType);
@ -142,16 +144,16 @@ public class MediaCCCStreamExtractor extends StreamExtractor {
public List<VideoStream> getVideoStreams() throws IOException, ExtractionException {
final JsonArray recordings = data.getArray("recordings");
final List<VideoStream> videoStreams = new ArrayList<>();
for(int i = 0; i < recordings.size(); i++) {
for (int i = 0; i < recordings.size(); i++) {
final JsonObject recording = recordings.getObject(i);
final String mimeType = recording.getString("mime_type");
if(mimeType.startsWith("video")) {
if (mimeType.startsWith("video")) {
//first we need to resolve the actual video data from CDN
final MediaFormat mediaFormat;
if(mimeType.endsWith("webm")) {
if (mimeType.endsWith("webm")) {
mediaFormat = MediaFormat.WEBM;
} else if(mimeType.endsWith("mp4")) {
} else if (mimeType.endsWith("mp4")) {
mediaFormat = MediaFormat.MPEG_4;
} else {
throw new ExtractionException("Unknown media format: " + mimeType);
@ -172,13 +174,13 @@ public class MediaCCCStreamExtractor extends StreamExtractor {
@Nonnull
@Override
public List<SubtitlesStream> getSubtitlesDefault() throws IOException, ExtractionException {
return null;
return Collections.emptyList();
}
@Nonnull
@Override
public List<SubtitlesStream> getSubtitles(MediaFormat format) throws IOException, ExtractionException {
return null;
public List<SubtitlesStream> getSubtitles(final MediaFormat format) throws IOException, ExtractionException {
return Collections.emptyList();
}
@Override
@ -211,7 +213,6 @@ public class MediaCCCStreamExtractor extends StreamExtractor {
} catch (JsonParserException jpe) {
throw new ExtractionException("Could not parse json returned by url: " + getLinkHandler().getUrl(), jpe);
}
}
@Nonnull
@ -225,4 +226,41 @@ public class MediaCCCStreamExtractor extends StreamExtractor {
public String getOriginalUrl() throws ParsingException {
return data.getString("frontend_link");
}
@Override
public String getHost() throws ParsingException {
return "";
}
@Override
public String getPrivacy() throws ParsingException {
return "";
}
@Override
public String getCategory() throws ParsingException {
return "";
}
@Override
public String getLicence() throws ParsingException {
return "";
}
@Override
public Locale getLanguageInfo() throws ParsingException {
return null;
}
@Nonnull
@Override
public List<String> getTags() throws ParsingException {
return new ArrayList<>();
}
@Nonnull
@Override
public String getSupportInfo() throws ParsingException {
return "";
}
}

View File

@ -15,18 +15,23 @@ public class MediaCCCConferenceLinkHandlerFactory extends ListLinkHandlerFactory
@Override
public String getId(String url) throws ParsingException {
if(url.startsWith("https://api.media.ccc.de/public/conferences/")) {
if (url.startsWith("https://api.media.ccc.de/public/conferences/")) {
return url.replace("https://api.media.ccc.de/public/conferences/", "");
} else if(url.startsWith("https://media.ccc.de/c/")) {
} else if (url.startsWith("https://media.ccc.de/c/")) {
return Parser.matchGroup1("https://media.ccc.de/c/([^?#]*)", url);
} else {
throw new ParsingException("Could not get id from url: " + url);
} else if (url.startsWith("https://media.ccc.de/b/")) {
return Parser.matchGroup1("https://media.ccc.de/b/([^?#]*)", url);
}
throw new ParsingException("Could not get id from url: " + url);
}
@Override
public boolean onAcceptUrl(String url) throws ParsingException {
return url.startsWith("https://api.media.ccc.de/public/conferences/")
|| url.startsWith("https://media.ccc.de/c/");
try {
getId(url);
return true;
} catch (ParsingException e) {
return false;
}
}
}

View File

@ -1,7 +1,6 @@
package org.schabi.newpipe.extractor.services.media_ccc.linkHandler;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.linkhandler.LinkHandlerFactory;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandlerFactory;
import java.util.List;

View File

@ -15,7 +15,7 @@ public class MediaCCCSearchQueryHandlerFactory extends SearchQueryHandlerFactory
@Override
public String[] getAvailableContentFilter() {
return new String[] {
return new String[]{
ALL,
CONFERENCES,
EVENTS

View File

@ -1,16 +1,39 @@
package org.schabi.newpipe.extractor.services.media_ccc.linkHandler;
import org.schabi.newpipe.extractor.exceptions.FoundAdException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.linkhandler.LinkHandlerFactory;
import org.schabi.newpipe.extractor.utils.Utils;
import java.net.MalformedURLException;
import java.net.URL;
public class MediaCCCStreamLinkHandlerFactory extends LinkHandlerFactory {
@Override
public String getId(String url) throws ParsingException {
if(url.startsWith("https://api.media.ccc.de/public/events/") &&
!url.contains("?q=")) {
return url.replace("https://api.media.ccc.de/public/events/", "");
public String getId(String urlString) throws ParsingException {
if (urlString.startsWith("https://api.media.ccc.de/public/events/") &&
!urlString.contains("?q=")) {
return urlString.substring(39); //remove api/public/events part
}
URL url;
try {
url = Utils.stringToURL(urlString);
} catch (MalformedURLException e) {
throw new IllegalArgumentException("The given URL is not valid");
}
String path = url.getPath();
// remove leading "/" of URL-path if URL-path is given
if (!path.isEmpty()) {
path = path.substring(1);
}
if (path.startsWith("v/")) {
return path.substring(2);
}
throw new ParsingException("Could not get id from url: " + url);
}
@ -21,7 +44,11 @@ public class MediaCCCStreamLinkHandlerFactory extends LinkHandlerFactory {
@Override
public boolean onAcceptUrl(String url) throws ParsingException {
return url.startsWith("https://api.media.ccc.de/public/events/") &&
!url.contains("?q=");
try {
getId(url);
return true;
} catch (ParsingException e) {
return false;
}
}
}

View File

@ -1,7 +1,8 @@
package org.schabi.newpipe.extractor.services.peertube;
import java.io.IOException;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import com.grack.nanojson.JsonParserException;
import org.jsoup.helper.StringUtil;
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.downloader.Downloader;
@ -10,22 +11,20 @@ import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.exceptions.ReCaptchaException;
import org.schabi.newpipe.extractor.utils.JsonUtils;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import com.grack.nanojson.JsonParserException;
import java.io.IOException;
public class PeertubeInstance {
private final String url;
private String name;
public static final PeertubeInstance defaultInstance = new PeertubeInstance("https://framatube.org", "FramaTube");
public PeertubeInstance(String url) {
this.url = url;
this.name = "PeerTube";
}
public PeertubeInstance(String url , String name) {
public PeertubeInstance(String url, String name) {
this.url = url;
this.name = name;
}
@ -33,22 +32,22 @@ public class PeertubeInstance {
public String getUrl() {
return url;
}
public void fetchInstanceMetaData() throws Exception {
Downloader downloader = NewPipe.getDownloader();
Response response = null;
try {
response = downloader.get(url + "/api/v1/config");
} catch (ReCaptchaException | IOException e) {
throw new Exception("unable to configure instance " + url, e);
}
if(null == response || StringUtil.isBlank(response.responseBody())) {
if (response == null || StringUtil.isBlank(response.responseBody())) {
throw new Exception("unable to configure instance " + url);
}
try {
try {
JsonObject json = JsonParser.object().from(response.responseBody());
this.name = JsonUtils.getString(json, "instance.name");
} catch (JsonParserException | ParsingException e) {
@ -59,5 +58,5 @@ public class PeertubeInstance {
public String getName() {
return name;
}
}

View File

@ -1,32 +1,34 @@
package org.schabi.newpipe.extractor.services.peertube;
import com.grack.nanojson.JsonObject;
import org.jsoup.helper.StringUtil;
import org.schabi.newpipe.extractor.exceptions.ContentNotAvailableException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Date;
import org.jsoup.helper.StringUtil;
import org.schabi.newpipe.extractor.exceptions.ContentNotAvailableException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import com.grack.nanojson.JsonObject;
import java.util.TimeZone;
public class PeertubeParsingHelper {
private PeertubeParsingHelper() {
}
public static void validate(JsonObject json) throws ContentNotAvailableException {
String error = json.getString("error");
if(!StringUtil.isBlank(error)) {
if (!StringUtil.isBlank(error)) {
throw new ContentNotAvailableException(error);
}
}
public static Calendar parseDateFrom(String textualUploadDate) throws ParsingException {
Date date;
try {
date = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.S'Z'").parse(textualUploadDate);
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.S'Z'");
sdf.setTimeZone(TimeZone.getTimeZone("GMT"));
date = sdf.parse(textualUploadDate);
} catch (ParseException e) {
throw new ParsingException("Could not parse date: \"" + textualUploadDate + "\"", e);
}

View File

@ -1,49 +1,35 @@
package org.schabi.newpipe.extractor.services.peertube;
import static java.util.Arrays.asList;
import static org.schabi.newpipe.extractor.StreamingService.ServiceInfo.MediaCapability.COMMENTS;
import static org.schabi.newpipe.extractor.StreamingService.ServiceInfo.MediaCapability.VIDEO;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.channel.ChannelExtractor;
import org.schabi.newpipe.extractor.comments.CommentsExtractor;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.kiosk.KioskExtractor;
import org.schabi.newpipe.extractor.kiosk.KioskList;
import org.schabi.newpipe.extractor.linkhandler.LinkHandler;
import org.schabi.newpipe.extractor.linkhandler.LinkHandlerFactory;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandlerFactory;
import org.schabi.newpipe.extractor.linkhandler.SearchQueryHandler;
import org.schabi.newpipe.extractor.linkhandler.SearchQueryHandlerFactory;
import org.schabi.newpipe.extractor.linkhandler.*;
import org.schabi.newpipe.extractor.playlist.PlaylistExtractor;
import org.schabi.newpipe.extractor.search.SearchExtractor;
import org.schabi.newpipe.extractor.services.peertube.extractors.PeertubeChannelExtractor;
import org.schabi.newpipe.extractor.services.peertube.extractors.PeertubeCommentsExtractor;
import org.schabi.newpipe.extractor.services.peertube.extractors.PeertubeSearchExtractor;
import org.schabi.newpipe.extractor.services.peertube.extractors.PeertubeStreamExtractor;
import org.schabi.newpipe.extractor.services.peertube.extractors.PeertubeSuggestionExtractor;
import org.schabi.newpipe.extractor.services.peertube.extractors.PeertubeTrendingExtractor;
import org.schabi.newpipe.extractor.services.peertube.linkHandler.PeertubeChannelLinkHandlerFactory;
import org.schabi.newpipe.extractor.services.peertube.linkHandler.PeertubeCommentsLinkHandlerFactory;
import org.schabi.newpipe.extractor.services.peertube.linkHandler.PeertubeSearchQueryHandlerFactory;
import org.schabi.newpipe.extractor.services.peertube.linkHandler.PeertubeStreamLinkHandlerFactory;
import org.schabi.newpipe.extractor.services.peertube.linkHandler.PeertubeTrendingLinkHandlerFactory;
import org.schabi.newpipe.extractor.services.peertube.extractors.*;
import org.schabi.newpipe.extractor.services.peertube.linkHandler.*;
import org.schabi.newpipe.extractor.stream.StreamExtractor;
import org.schabi.newpipe.extractor.subscription.SubscriptionExtractor;
import org.schabi.newpipe.extractor.suggestion.SuggestionExtractor;
import static java.util.Arrays.asList;
import static org.schabi.newpipe.extractor.StreamingService.ServiceInfo.MediaCapability.COMMENTS;
import static org.schabi.newpipe.extractor.StreamingService.ServiceInfo.MediaCapability.VIDEO;
public class PeertubeService extends StreamingService {
private PeertubeInstance instance;
public PeertubeService(int id) {
this(id, PeertubeInstance.defaultInstance);
}
public PeertubeService(int id, PeertubeInstance instance) {
super(id, "PeerTube", asList(VIDEO, COMMENTS));
this.instance = instance;
this.instance = instance;
}
@Override
@ -117,15 +103,15 @@ public class PeertubeService extends StreamingService {
public String getBaseUrl() {
return instance.getUrl();
}
public PeertubeInstance getInstance() {
return this.instance;
}
public void setInstance(PeertubeInstance instance) {
this.instance = instance;
}
@Override
public KioskList getKioskList() throws ExtractionException {
KioskList.KioskExtractorFactory kioskFactory = new KioskList.KioskExtractorFactory() {
@ -155,6 +141,6 @@ public class PeertubeService extends StreamingService {
return list;
}
}

View File

@ -1,7 +1,9 @@
package org.schabi.newpipe.extractor.services.peertube.extractors;
import java.io.IOException;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import com.grack.nanojson.JsonParserException;
import org.jsoup.helper.StringUtil;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.channel.ChannelExtractor;
@ -17,21 +19,18 @@ import org.schabi.newpipe.extractor.utils.JsonUtils;
import org.schabi.newpipe.extractor.utils.Parser;
import org.schabi.newpipe.extractor.utils.Parser.RegexException;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import com.grack.nanojson.JsonParserException;
import java.io.IOException;
public class PeertubeChannelExtractor extends ChannelExtractor {
private static final String START_KEY = "start";
private static final String COUNT_KEY = "count";
private static final int ITEMS_PER_PAGE = 12;
private static final String START_PATTERN = "start=(\\d*)";
private InfoItemsPage<StreamInfoItem> initPage;
private long total;
private JsonObject json;
private final String baseUrl;
@ -45,7 +44,7 @@ public class PeertubeChannelExtractor extends ChannelExtractor {
String value;
try {
value = JsonUtils.getString(json, "avatar.path");
}catch(Exception e) {
} catch (Exception e) {
value = "/client/assets/images/default-avatar.png";
}
return baseUrl + value;
@ -71,7 +70,7 @@ public class PeertubeChannelExtractor extends ChannelExtractor {
public String getDescription() throws ParsingException {
try {
return JsonUtils.getString(json, "description");
}catch(ParsingException e) {
} catch (ParsingException e) {
return "No description";
}
}
@ -86,18 +85,18 @@ public class PeertubeChannelExtractor extends ChannelExtractor {
JsonArray contents;
try {
contents = (JsonArray) JsonUtils.getValue(json, "data");
}catch(Exception e) {
} catch (Exception e) {
throw new ParsingException("unable to extract channel streams", e);
}
for(Object c: contents) {
if(c instanceof JsonObject) {
for (Object c : contents) {
if (c instanceof JsonObject) {
final JsonObject item = (JsonObject) c;
PeertubeStreamInfoItemExtractor extractor = new PeertubeStreamInfoItemExtractor(item, baseUrl);
collector.commit(extractor);
}
}
}
@Override
@ -110,26 +109,26 @@ public class PeertubeChannelExtractor extends ChannelExtractor {
public InfoItemsPage<StreamInfoItem> getPage(String pageUrl) throws IOException, ExtractionException {
Response response = getDownloader().get(pageUrl);
JsonObject json = null;
if(null != response && !StringUtil.isBlank(response.responseBody())) {
if (null != response && !StringUtil.isBlank(response.responseBody())) {
try {
json = JsonParser.object().from(response.responseBody());
} catch (Exception e) {
throw new ParsingException("Could not parse json data for kiosk info", e);
}
}
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
if(json != null) {
if (json != null) {
PeertubeParsingHelper.validate(json);
Number number = JsonUtils.getNumber(json, "total");
if(number != null) this.total = number.longValue();
if (number != null) this.total = number.longValue();
collectStreamsFrom(collector, json, pageUrl);
} else {
throw new ExtractionException("Unable to get peertube kiosk info");
throw new ExtractionException("Unable to get PeerTube kiosk info");
}
return new InfoItemsPage<>(collector, getNextPageUrl(pageUrl));
}
private String getNextPageUrl(String prevPageUrl) {
String prevStart;
@ -138,30 +137,30 @@ public class PeertubeChannelExtractor extends ChannelExtractor {
} catch (RegexException e) {
return "";
}
if(StringUtil.isBlank(prevStart)) return "";
if (StringUtil.isBlank(prevStart)) return "";
long nextStart = 0;
try {
nextStart = Long.valueOf(prevStart) + ITEMS_PER_PAGE;
} catch (NumberFormatException e) {
return "";
}
if(nextStart >= total) {
if (nextStart >= total) {
return "";
}else {
} else {
return prevPageUrl.replace(START_KEY + "=" + prevStart, START_KEY + "=" + String.valueOf(nextStart));
}
}
@Override
public void onFetchPage(Downloader downloader) throws IOException, ExtractionException {
Response response = downloader.get(getUrl());
if(null != response && null != response.responseBody()) {
if (null != response && null != response.responseBody()) {
setInitialData(response.responseBody());
}else {
throw new ExtractionException("Unable to extract peertube channel data");
} else {
throw new ExtractionException("Unable to extract PeerTube channel data");
}
String pageUrl = getUrl() + "/videos?" + START_KEY + "=0&" + COUNT_KEY + "=" + ITEMS_PER_PAGE;
this.initPage = getPage(pageUrl);
}
@ -172,14 +171,14 @@ public class PeertubeChannelExtractor extends ChannelExtractor {
} catch (JsonParserException e) {
throw new ExtractionException("Unable to extract peertube channel data", e);
}
if(null == json) throw new ExtractionException("Unable to extract peertube channel data");
if (json == null) throw new ExtractionException("Unable to extract PeerTube channel data");
}
@Override
public String getName() throws ParsingException {
return JsonUtils.getString(json, "displayName");
}
@Override
public String getOriginalUrl() throws ParsingException {
return baseUrl + "/accounts/" + getId();

View File

@ -1,7 +1,8 @@
package org.schabi.newpipe.extractor.services.peertube.extractors;
import java.io.IOException;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import org.jsoup.helper.StringUtil;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.comments.CommentsExtractor;
@ -16,9 +17,7 @@ import org.schabi.newpipe.extractor.utils.JsonUtils;
import org.schabi.newpipe.extractor.utils.Parser;
import org.schabi.newpipe.extractor.utils.Parser.RegexException;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import java.io.IOException;
public class PeertubeCommentsExtractor extends CommentsExtractor {
@ -26,10 +25,10 @@ public class PeertubeCommentsExtractor extends CommentsExtractor {
private static final String COUNT_KEY = "count";
private static final int ITEMS_PER_PAGE = 12;
private static final String START_PATTERN = "start=(\\d*)";
private InfoItemsPage<CommentsInfoItem> initPage;
private long total;
public PeertubeCommentsExtractor(StreamingService service, ListLinkHandler uiHandler) {
super(service, uiHandler);
}
@ -38,7 +37,7 @@ public class PeertubeCommentsExtractor extends CommentsExtractor {
public String getName() throws ParsingException {
return "Comments";
}
@Override
public InfoItemsPage<CommentsInfoItem> getInitialPage() throws IOException, ExtractionException {
super.fetchPage();
@ -49,18 +48,18 @@ public class PeertubeCommentsExtractor extends CommentsExtractor {
JsonArray contents;
try {
contents = (JsonArray) JsonUtils.getValue(json, "data");
}catch(Exception e) {
} catch (Exception e) {
throw new ParsingException("unable to extract comments info", e);
}
for(Object c: contents) {
if(c instanceof JsonObject) {
for (Object c : contents) {
if (c instanceof JsonObject) {
final JsonObject item = (JsonObject) c;
PeertubeCommentsInfoItemExtractor extractor = new PeertubeCommentsInfoItemExtractor(item, this);
collector.commit(extractor);
}
}
}
@Override
@ -73,18 +72,18 @@ public class PeertubeCommentsExtractor extends CommentsExtractor {
public InfoItemsPage<CommentsInfoItem> getPage(String pageUrl) throws IOException, ExtractionException {
Response response = getDownloader().get(pageUrl);
JsonObject json = null;
if(null != response && !StringUtil.isBlank(response.responseBody())) {
if (null != response && !StringUtil.isBlank(response.responseBody())) {
try {
json = JsonParser.object().from(response.responseBody());
} catch (Exception e) {
throw new ParsingException("Could not parse json data for comments info", e);
}
}
CommentsInfoItemsCollector collector = new CommentsInfoItemsCollector(getServiceId());
if(json != null) {
if (json != null) {
Number number = JsonUtils.getNumber(json, "total");
if(number != null) this.total = number.longValue();
if (number != null) this.total = number.longValue();
collectStreamsFrom(collector, json, pageUrl);
} else {
throw new ExtractionException("Unable to get peertube comments info");
@ -97,7 +96,7 @@ public class PeertubeCommentsExtractor extends CommentsExtractor {
String pageUrl = getUrl() + "?" + START_KEY + "=0&" + COUNT_KEY + "=" + ITEMS_PER_PAGE;
this.initPage = getPage(pageUrl);
}
private String getNextPageUrl(String prevPageUrl) {
String prevStart;
try {
@ -105,17 +104,17 @@ public class PeertubeCommentsExtractor extends CommentsExtractor {
} catch (RegexException e) {
return "";
}
if(StringUtil.isBlank(prevStart)) return "";
if (StringUtil.isBlank(prevStart)) return "";
long nextStart = 0;
try {
nextStart = Long.valueOf(prevStart) + ITEMS_PER_PAGE;
} catch (NumberFormatException e) {
return "";
}
if(nextStart >= total) {
if (nextStart >= total) {
return "";
}else {
} else {
return prevPageUrl.replace(START_KEY + "=" + prevStart, START_KEY + "=" + String.valueOf(nextStart));
}
}

View File

@ -1,5 +1,6 @@
package org.schabi.newpipe.extractor.services.peertube.extractors;
import com.grack.nanojson.JsonObject;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.schabi.newpipe.extractor.ServiceList;
@ -9,8 +10,6 @@ import org.schabi.newpipe.extractor.localization.DateWrapper;
import org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper;
import org.schabi.newpipe.extractor.utils.JsonUtils;
import com.grack.nanojson.JsonObject;
public class PeertubeCommentsInfoItemExtractor implements CommentsInfoItemExtractor {
@ -34,7 +33,7 @@ public class PeertubeCommentsInfoItemExtractor implements CommentsInfoItemExtrac
String value;
try {
value = JsonUtils.getString(item, "account.avatar.path");
}catch(Exception e) {
} catch (Exception e) {
value = "/client/assets/images/default-avatar.png";
}
return baseUrl + value;
@ -49,13 +48,13 @@ public class PeertubeCommentsInfoItemExtractor implements CommentsInfoItemExtrac
public String getTextualPublishedTime() throws ParsingException {
return JsonUtils.getString(item, "createdAt");
}
@Override
public DateWrapper getPublishedTime() throws ParsingException {
String textualUploadDate = getTextualPublishedTime();
return new DateWrapper(PeertubeParsingHelper.parseDateFrom(textualUploadDate));
}
@Override
public int getLikeCount() throws ParsingException {
return -1;
@ -67,7 +66,7 @@ public class PeertubeCommentsInfoItemExtractor implements CommentsInfoItemExtrac
try {
Document doc = Jsoup.parse(htmlText);
return doc.body().text();
}catch(Exception e) {
} catch (Exception e) {
return htmlText.replaceAll("(?s)<[^>]*>(\\s*<[^>]*>)*", "");
}
}
@ -83,7 +82,7 @@ public class PeertubeCommentsInfoItemExtractor implements CommentsInfoItemExtrac
String value;
try {
value = JsonUtils.getString(item, "account.avatar.path");
}catch(Exception e) {
} catch (Exception e) {
value = "/client/assets/images/default-avatar.png";
}
return baseUrl + value;
@ -91,7 +90,7 @@ public class PeertubeCommentsInfoItemExtractor implements CommentsInfoItemExtrac
@Override
public String getAuthorName() throws ParsingException {
return JsonUtils.getString(item, "account.displayName");
return JsonUtils.getString(item, "account.name") + "@" + JsonUtils.getString(item, "account.host");
}
@Override
@ -100,5 +99,5 @@ public class PeertubeCommentsInfoItemExtractor implements CommentsInfoItemExtrac
String host = JsonUtils.getString(item, "account.host");
return ServiceList.PeerTube.getChannelLHFactory().fromId(name + "@" + host, baseUrl).getUrl();
}
}

View File

@ -1,7 +1,5 @@
package org.schabi.newpipe.extractor.services.peertube.extractors;
import java.io.IOException;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
@ -10,7 +8,9 @@ import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler;
import org.schabi.newpipe.extractor.playlist.PlaylistExtractor;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
public class PeertubePlaylistExtractor extends PlaylistExtractor{
import java.io.IOException;
public class PeertubePlaylistExtractor extends PlaylistExtractor {
public PeertubePlaylistExtractor(StreamingService service, ListLinkHandler linkHandler) {
super(service, linkHandler);
@ -73,8 +73,7 @@ public class PeertubePlaylistExtractor extends PlaylistExtractor{
@Override
public void onFetchPage(Downloader downloader) throws IOException, ExtractionException {
// TODO Auto-generated method stub
}
@Override

View File

@ -1,7 +1,8 @@
package org.schabi.newpipe.extractor.services.peertube.extractors;
import java.io.IOException;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import org.jsoup.helper.StringUtil;
import org.schabi.newpipe.extractor.InfoItem;
import org.schabi.newpipe.extractor.InfoItemExtractor;
@ -18,9 +19,7 @@ import org.schabi.newpipe.extractor.utils.JsonUtils;
import org.schabi.newpipe.extractor.utils.Parser;
import org.schabi.newpipe.extractor.utils.Parser.RegexException;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import java.io.IOException;
public class PeertubeSearchExtractor extends SearchExtractor {
@ -28,10 +27,10 @@ public class PeertubeSearchExtractor extends SearchExtractor {
private static final String COUNT_KEY = "count";
private static final int ITEMS_PER_PAGE = 12;
private static final String START_PATTERN = "start=(\\d*)";
private InfoItemsPage<InfoItem> initPage;
private long total;
public PeertubeSearchExtractor(StreamingService service, SearchQueryHandler linkHandler) {
super(service, linkHandler);
}
@ -40,7 +39,7 @@ public class PeertubeSearchExtractor extends SearchExtractor {
public String getSearchSuggestion() throws ParsingException {
return null;
}
@Override
public InfoItemsPage<InfoItem> getInitialPage() throws IOException, ExtractionException {
super.fetchPage();
@ -48,27 +47,27 @@ public class PeertubeSearchExtractor extends SearchExtractor {
}
private InfoItemsCollector<InfoItem, InfoItemExtractor> collectStreamsFrom(JsonObject json) throws ParsingException {
final InfoItemsSearchCollector collector = getInfoItemSearchCollector();
JsonArray contents;
try {
contents = (JsonArray) JsonUtils.getValue(json, "data");
}catch(Exception e) {
} catch (Exception e) {
throw new ParsingException("unable to extract search info", e);
}
String baseUrl = getBaseUrl();
for(Object c: contents) {
if(c instanceof JsonObject) {
for (Object c : contents) {
if (c instanceof JsonObject) {
final JsonObject item = (JsonObject) c;
PeertubeStreamInfoItemExtractor extractor = new PeertubeStreamInfoItemExtractor(item, baseUrl);
collector.commit(extractor);
}
}
return collector;
}
@Override
@ -81,17 +80,17 @@ public class PeertubeSearchExtractor extends SearchExtractor {
public InfoItemsPage<InfoItem> getPage(String pageUrl) throws IOException, ExtractionException {
Response response = getDownloader().get(pageUrl);
JsonObject json = null;
if(null != response && !StringUtil.isBlank(response.responseBody())) {
if (null != response && !StringUtil.isBlank(response.responseBody())) {
try {
json = JsonParser.object().from(response.responseBody());
} catch (Exception e) {
throw new ParsingException("Could not parse json data for search info", e);
}
}
if(json != null) {
if (json != null) {
Number number = JsonUtils.getNumber(json, "total");
if(number != null) this.total = number.longValue();
if (number != null) this.total = number.longValue();
return new InfoItemsPage<>(collectStreamsFrom(json), getNextPageUrl(pageUrl));
} else {
throw new ExtractionException("Unable to get peertube search info");
@ -103,7 +102,7 @@ public class PeertubeSearchExtractor extends SearchExtractor {
String pageUrl = getUrl() + "&" + START_KEY + "=0&" + COUNT_KEY + "=" + ITEMS_PER_PAGE;
this.initPage = getPage(pageUrl);
}
private String getNextPageUrl(String prevPageUrl) {
String prevStart;
try {
@ -111,20 +110,20 @@ public class PeertubeSearchExtractor extends SearchExtractor {
} catch (RegexException e) {
return "";
}
if(StringUtil.isBlank(prevStart)) return "";
if (StringUtil.isBlank(prevStart)) return "";
long nextStart = 0;
try {
nextStart = Long.valueOf(prevStart) + ITEMS_PER_PAGE;
} catch (NumberFormatException e) {
return "";
}
if(nextStart >= total) {
if (nextStart >= total) {
return "";
}else {
} else {
return prevPageUrl.replace(START_KEY + "=" + prevStart, START_KEY + "=" + String.valueOf(nextStart));
}
}
}

View File

@ -1,15 +1,12 @@
package org.schabi.newpipe.extractor.services.peertube.extractors;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import com.grack.nanojson.JsonParserException;
import org.jsoup.helper.StringUtil;
import org.schabi.newpipe.extractor.MediaFormat;
import org.schabi.newpipe.extractor.ServiceList;
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.downloader.Response;
@ -20,34 +17,30 @@ import org.schabi.newpipe.extractor.linkhandler.LinkHandler;
import org.schabi.newpipe.extractor.localization.DateWrapper;
import org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper;
import org.schabi.newpipe.extractor.services.peertube.linkHandler.PeertubeSearchQueryHandlerFactory;
import org.schabi.newpipe.extractor.stream.AudioStream;
import org.schabi.newpipe.extractor.stream.Stream;
import org.schabi.newpipe.extractor.stream.StreamExtractor;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import org.schabi.newpipe.extractor.stream.StreamType;
import org.schabi.newpipe.extractor.stream.SubtitlesStream;
import org.schabi.newpipe.extractor.stream.VideoStream;
import org.schabi.newpipe.extractor.stream.*;
import org.schabi.newpipe.extractor.utils.JsonUtils;
import org.schabi.newpipe.extractor.utils.Utils;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import com.grack.nanojson.JsonParserException;
import javax.annotation.Nonnull;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
public class PeertubeStreamExtractor extends StreamExtractor {
private final String baseUrl;
private JsonObject json;
private List<SubtitlesStream> subtitles = new ArrayList<>();
private final String baseUrl;
public PeertubeStreamExtractor(StreamingService service, LinkHandler linkHandler) throws ParsingException {
super(service, linkHandler);
this.baseUrl = getBaseUrl();
}
@Override
public String getTextualUploadDate() throws ParsingException {
return JsonUtils.getString(json, "publishedAt");
@ -63,24 +56,42 @@ public class PeertubeStreamExtractor extends StreamExtractor {
return new DateWrapper(PeertubeParsingHelper.parseDateFrom(textualUploadDate));
}
@Override
public String getThumbnailUrl() throws ParsingException {
return baseUrl + JsonUtils.getString(json, "thumbnailPath");
return baseUrl + JsonUtils.getString(json, "previewPath");
}
@Override
public String getDescription() throws ParsingException {
public Description getDescription() throws ParsingException {
String text;
try {
return JsonUtils.getString(json, "description");
}catch(ParsingException e) {
return "No description";
text = JsonUtils.getString(json, "description");
} catch (ParsingException e) {
return Description.emptyDescription;
}
if (text.length() == 250 && text.substring(247).equals("...")) {
//if description is shortened, get full description
Downloader dl = NewPipe.getDownloader();
try {
Response response = dl.get(getUrl() + "/description");
JsonObject jsonObject = JsonParser.object().from(response.responseBody());
text = JsonUtils.getString(jsonObject, "description");
} catch (ReCaptchaException | IOException | JsonParserException e) {
e.printStackTrace();
}
}
return new Description(text, Description.MARKDOWN);
}
@Override
public int getAgeLimit() throws ParsingException {
return NO_AGE_LIMIT;
boolean isNSFW = JsonUtils.getBoolean(json, "nsfw");
if (isNSFW) {
return 18;
} else {
return NO_AGE_LIMIT;
}
}
@Override
@ -130,7 +141,7 @@ public class PeertubeStreamExtractor extends StreamExtractor {
String value;
try {
value = JsonUtils.getString(json, "account.avatar.path");
}catch(Exception e) {
} catch (Exception e) {
value = "/client/assets/images/default-avatar.png";
}
return baseUrl + value;
@ -157,8 +168,8 @@ public class PeertubeStreamExtractor extends StreamExtractor {
List<VideoStream> videoStreams = new ArrayList<>();
try {
JsonArray streams = json.getArray("files", new JsonArray());
for(Object s: streams) {
if(!(s instanceof JsonObject)) continue;
for (Object s : streams) {
if (!(s instanceof JsonObject)) continue;
JsonObject stream = (JsonObject) s;
String url = JsonUtils.getString(stream, "fileUrl");
String torrentUrl = JsonUtils.getString(stream, "torrentUrl");
@ -192,8 +203,8 @@ public class PeertubeStreamExtractor extends StreamExtractor {
@Override
public List<SubtitlesStream> getSubtitles(final MediaFormat format) throws IOException, ExtractionException {
List<SubtitlesStream> filteredSubs = new ArrayList<>();
for(SubtitlesStream sub: subtitles) {
if(sub.getFormat() == format) {
for (SubtitlesStream sub : subtitles) {
if (sub.getFormat() == format) {
filteredSubs.add(sub);
}
}
@ -215,29 +226,40 @@ public class PeertubeStreamExtractor extends StreamExtractor {
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
List<String> tags = getTags();
String apiUrl = null;
if(!tags.isEmpty()) {
if (!tags.isEmpty()) {
apiUrl = getRelatedStreamsUrl(tags);
}else {
} else {
apiUrl = getUploaderUrl() + "/videos?start=0&count=8";
}
if(!StringUtil.isBlank(apiUrl)) getStreamsFromApi(collector, apiUrl);
if (!StringUtil.isBlank(apiUrl)) getStreamsFromApi(collector, apiUrl);
return collector;
}
private List<String> getTags(){
@Override
public List<String> getTags() {
try {
return (List) JsonUtils.getArray(json, "tags");
} catch (Exception e) {
return Collections.emptyList();
}
}
@Nonnull
@Override
public String getSupportInfo() throws ParsingException {
try {
return JsonUtils.getString(json, "support");
} catch (ParsingException e) {
return "";
}
}
private String getRelatedStreamsUrl(List<String> tags) throws UnsupportedEncodingException {
String url = baseUrl + PeertubeSearchQueryHandlerFactory.SEARCH_ENDPOINT;
StringBuilder params = new StringBuilder();
params.append("start=0&count=8&sort=-createdAt");
for(String tag : tags) {
for (String tag : tags) {
params.append("&tagsOneOf=");
params.append(URLEncoder.encode(tag, "UTF-8"));
}
@ -247,38 +269,38 @@ public class PeertubeStreamExtractor extends StreamExtractor {
private void getStreamsFromApi(StreamInfoItemsCollector collector, String apiUrl) throws ReCaptchaException, IOException, ParsingException {
Response response = getDownloader().get(apiUrl);
JsonObject relatedVideosJson = null;
if(null != response && !StringUtil.isBlank(response.responseBody())) {
if (null != response && !StringUtil.isBlank(response.responseBody())) {
try {
relatedVideosJson = JsonParser.object().from(response.responseBody());
} catch (JsonParserException e) {
throw new ParsingException("Could not parse json data for related videos", e);
}
}
if(relatedVideosJson != null) {
if (relatedVideosJson != null) {
collectStreamsFrom(collector, relatedVideosJson);
}
}
private void collectStreamsFrom(StreamInfoItemsCollector collector, JsonObject json) throws ParsingException {
JsonArray contents;
try {
contents = (JsonArray) JsonUtils.getValue(json, "data");
}catch(Exception e) {
} catch (Exception e) {
throw new ParsingException("unable to extract related videos", e);
}
for(Object c: contents) {
if(c instanceof JsonObject) {
for (Object c : contents) {
if (c instanceof JsonObject) {
final JsonObject item = (JsonObject) c;
PeertubeStreamInfoItemExtractor extractor = new PeertubeStreamInfoItemExtractor(item, baseUrl);
//do not add the same stream in related streams
if(!extractor.getUrl().equals(getUrl())) collector.commit(extractor);
if (!extractor.getUrl().equals(getUrl())) collector.commit(extractor);
}
}
}
@Override
public String getErrorMessage() {
@ -288,12 +310,12 @@ public class PeertubeStreamExtractor extends StreamExtractor {
@Override
public void onFetchPage(Downloader downloader) throws IOException, ExtractionException {
Response response = downloader.get(getUrl());
if(null != response && null != response.responseBody()) {
if (null != response && null != response.responseBody()) {
setInitialData(response.responseBody());
}else {
} else {
throw new ExtractionException("Unable to extract peertube channel data");
}
loadSubtitles();
}
@ -303,24 +325,25 @@ public class PeertubeStreamExtractor extends StreamExtractor {
} catch (JsonParserException e) {
throw new ExtractionException("Unable to extract peertube stream data", e);
}
if(null == json) throw new ExtractionException("Unable to extract peertube stream data");
if (null == json) throw new ExtractionException("Unable to extract peertube stream data");
PeertubeParsingHelper.validate(json);
}
private void loadSubtitles() {
if (subtitles.isEmpty()) {
try {
Response response = getDownloader().get(getUrl() + "/captions");
Response response = getDownloader().get(getUrl() + "/captions");
JsonObject captionsJson = JsonParser.object().from(response.responseBody());
JsonArray captions = JsonUtils.getArray(captionsJson, "data");
for(Object c: captions) {
if(c instanceof JsonObject) {
JsonObject caption = (JsonObject)c;
for (Object c : captions) {
if (c instanceof JsonObject) {
JsonObject caption = (JsonObject) c;
String url = baseUrl + JsonUtils.getString(caption, "captionPath");
String languageCode = JsonUtils.getString(caption, "language.id");
String ext = url.substring(url.lastIndexOf(".") + 1);
MediaFormat fmt = MediaFormat.getFromSuffix(ext);
if(fmt != null && languageCode != null) subtitles.add(new SubtitlesStream(fmt, languageCode, url, false));
if (fmt != null && languageCode != null)
subtitles.add(new SubtitlesStream(fmt, languageCode, url, false));
}
}
} catch (Exception e) {
@ -339,4 +362,32 @@ public class PeertubeStreamExtractor extends StreamExtractor {
return baseUrl + "/videos/watch/" + getId();
}
@Override
public String getHost() throws ParsingException {
return JsonUtils.getString(json, "account.host");
}
@Override
public String getPrivacy() throws ParsingException {
return JsonUtils.getString(json, "privacy.label");
}
@Override
public String getCategory() throws ParsingException {
return JsonUtils.getString(json, "category.label");
}
@Override
public String getLicence() throws ParsingException {
return JsonUtils.getString(json, "licence.label");
}
@Override
public Locale getLanguageInfo() throws ParsingException {
try {
return new Locale(JsonUtils.getString(json, "language.id"));
} catch (ParsingException e) {
return null;
}
}
}

View File

@ -1,5 +1,6 @@
package org.schabi.newpipe.extractor.services.peertube.extractors;
import com.grack.nanojson.JsonObject;
import org.schabi.newpipe.extractor.ServiceList;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.localization.DateWrapper;
@ -8,58 +9,56 @@ import org.schabi.newpipe.extractor.stream.StreamInfoItemExtractor;
import org.schabi.newpipe.extractor.stream.StreamType;
import org.schabi.newpipe.extractor.utils.JsonUtils;
import com.grack.nanojson.JsonObject;
public class PeertubeStreamInfoItemExtractor implements StreamInfoItemExtractor {
protected final JsonObject item;
private final String baseUrl;
public PeertubeStreamInfoItemExtractor(JsonObject item, String baseUrl) {
this.item = item;
this.baseUrl = baseUrl;
}
@Override
public String getUrl() throws ParsingException {
String uuid = JsonUtils.getString(item, "uuid");
return ServiceList.PeerTube.getStreamLHFactory().fromId(uuid, baseUrl).getUrl();
}
@Override
public String getThumbnailUrl() throws ParsingException {
String value = JsonUtils.getString(item, "thumbnailPath");
return baseUrl + value;
}
@Override
public String getName() throws ParsingException {
return JsonUtils.getString(item, "name");
}
@Override
public boolean isAd() throws ParsingException {
return false;
}
@Override
public long getViewCount() throws ParsingException {
Number value = JsonUtils.getNumber(item, "views");
return value.longValue();
}
@Override
public String getUploaderUrl() throws ParsingException {
String name = JsonUtils.getString(item, "account.name");
String host = JsonUtils.getString(item, "account.host");
return ServiceList.PeerTube.getChannelLHFactory().fromId(name + "@" + host, baseUrl).getUrl();
}
@Override
public String getUploaderName() throws ParsingException {
return JsonUtils.getString(item, "account.displayName");
}
@Override
public String getTextualUploadDate() throws ParsingException {
return JsonUtils.getString(item, "publishedAt");
@ -75,12 +74,12 @@ public class PeertubeStreamInfoItemExtractor implements StreamInfoItemExtractor
return new DateWrapper(PeertubeParsingHelper.parseDateFrom(textualUploadDate));
}
@Override
public StreamType getStreamType() throws ParsingException {
return StreamType.VIDEO_STREAM;
}
@Override
public long getDuration() throws ParsingException {
Number value = JsonUtils.getNumber(item, "duration");

View File

@ -1,10 +1,10 @@
package org.schabi.newpipe.extractor.services.peertube.extractors;
import java.util.List;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.subscription.SubscriptionExtractor;
import java.util.List;
public class PeertubeSubscriptionExtractor extends SubscriptionExtractor {
public PeertubeSubscriptionExtractor(StreamingService service, List<ContentSource> supportedSources) {

View File

@ -1,14 +1,14 @@
package org.schabi.newpipe.extractor.services.peertube.extractors;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.suggestion.SuggestionExtractor;
public class PeertubeSuggestionExtractor extends SuggestionExtractor{
import java.io.IOException;
import java.util.Collections;
import java.util.List;
public class PeertubeSuggestionExtractor extends SuggestionExtractor {
public PeertubeSuggestionExtractor(StreamingService service) {
super(service);

View File

@ -1,7 +1,8 @@
package org.schabi.newpipe.extractor.services.peertube.extractors;
import java.io.IOException;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import org.jsoup.helper.StringUtil;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.downloader.Downloader;
@ -16,17 +17,15 @@ import org.schabi.newpipe.extractor.utils.JsonUtils;
import org.schabi.newpipe.extractor.utils.Parser;
import org.schabi.newpipe.extractor.utils.Parser.RegexException;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import java.io.IOException;
public class PeertubeTrendingExtractor extends KioskExtractor<StreamInfoItem> {
private static final String START_KEY = "start";
private static final String COUNT_KEY = "count";
private static final int ITEMS_PER_PAGE = 12;
private static final String START_PATTERN = "start=(\\d*)";
private InfoItemsPage<StreamInfoItem> initPage;
private long total;
@ -49,19 +48,19 @@ public class PeertubeTrendingExtractor extends KioskExtractor<StreamInfoItem> {
JsonArray contents;
try {
contents = (JsonArray) JsonUtils.getValue(json, "data");
}catch(Exception e) {
throw new ParsingException("unable to extract kiosk info", e);
} catch (Exception e) {
throw new ParsingException("Unable to extract kiosk info", e);
}
String baseUrl = getBaseUrl();
for(Object c: contents) {
if(c instanceof JsonObject) {
for (Object c : contents) {
if (c instanceof JsonObject) {
final JsonObject item = (JsonObject) c;
PeertubeStreamInfoItemExtractor extractor = new PeertubeStreamInfoItemExtractor(item, baseUrl);
collector.commit(extractor);
}
}
}
@Override
@ -74,18 +73,18 @@ public class PeertubeTrendingExtractor extends KioskExtractor<StreamInfoItem> {
public InfoItemsPage<StreamInfoItem> getPage(String pageUrl) throws IOException, ExtractionException {
Response response = getDownloader().get(pageUrl);
JsonObject json = null;
if(null != response && !StringUtil.isBlank(response.responseBody())) {
if (null != response && !StringUtil.isBlank(response.responseBody())) {
try {
json = JsonParser.object().from(response.responseBody());
} catch (Exception e) {
throw new ParsingException("Could not parse json data for kiosk info", e);
}
}
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
if(json != null) {
if (json != null) {
Number number = JsonUtils.getNumber(json, "total");
if(number != null) this.total = number.longValue();
if (number != null) this.total = number.longValue();
collectStreamsFrom(collector, json, pageUrl);
} else {
throw new ExtractionException("Unable to get peertube kiosk info");
@ -98,7 +97,7 @@ public class PeertubeTrendingExtractor extends KioskExtractor<StreamInfoItem> {
String pageUrl = getUrl() + "&" + START_KEY + "=0&" + COUNT_KEY + "=" + ITEMS_PER_PAGE;
this.initPage = getPage(pageUrl);
}
private String getNextPageUrl(String prevPageUrl) {
String prevStart;
try {
@ -106,17 +105,17 @@ public class PeertubeTrendingExtractor extends KioskExtractor<StreamInfoItem> {
} catch (RegexException e) {
return "";
}
if(StringUtil.isBlank(prevStart)) return "";
if (StringUtil.isBlank(prevStart)) return "";
long nextStart = 0;
try {
nextStart = Long.valueOf(prevStart) + ITEMS_PER_PAGE;
} catch (NumberFormatException e) {
return "";
}
if(nextStart >= total) {
if (nextStart >= total) {
return "";
}else {
} else {
return prevPageUrl.replace(START_KEY + "=" + prevStart, START_KEY + "=" + String.valueOf(nextStart));
}
}

View File

@ -1,12 +1,12 @@
package org.schabi.newpipe.extractor.services.peertube.linkHandler;
import java.util.List;
import org.schabi.newpipe.extractor.ServiceList;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandlerFactory;
import org.schabi.newpipe.extractor.utils.Parser;
import java.util.List;
public class PeertubeChannelLinkHandlerFactory extends ListLinkHandlerFactory {
private static final PeertubeChannelLinkHandlerFactory instance = new PeertubeChannelLinkHandlerFactory();
@ -27,7 +27,7 @@ public class PeertubeChannelLinkHandlerFactory extends ListLinkHandlerFactory {
String baseUrl = ServiceList.PeerTube.getBaseUrl();
return getUrl(id, contentFilters, searchFilter, baseUrl);
}
@Override
public String getUrl(String id, List<String> contentFilter, String sortFilter, String baseUrl)
throws ParsingException {

View File

@ -1,13 +1,13 @@
package org.schabi.newpipe.extractor.services.peertube.linkHandler;
import java.util.List;
import org.schabi.newpipe.extractor.ServiceList;
import org.schabi.newpipe.extractor.exceptions.FoundAdException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandlerFactory;
import org.schabi.newpipe.extractor.utils.Parser;
import java.util.List;
public class PeertubeCommentsLinkHandlerFactory extends ListLinkHandlerFactory {
private static final PeertubeCommentsLinkHandlerFactory instance = new PeertubeCommentsLinkHandlerFactory();
@ -33,10 +33,10 @@ public class PeertubeCommentsLinkHandlerFactory extends ListLinkHandlerFactory {
String baseUrl = ServiceList.PeerTube.getBaseUrl();
return getUrl(id, contentFilter, sortFilter, baseUrl);
}
@Override
public String getUrl(String id, List<String> contentFilter, String sortFilter, String baseUrl) throws ParsingException {
return baseUrl + String.format(COMMENTS_ENDPOINT, id);
}
}

View File

@ -1,13 +1,13 @@
package org.schabi.newpipe.extractor.services.peertube.linkHandler;
import java.util.List;
import org.schabi.newpipe.extractor.ServiceList;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandlerFactory;
import org.schabi.newpipe.extractor.utils.Parser;
import java.util.List;
public class PeertubePlaylistLinkHandlerFactory extends ListLinkHandlerFactory {
private static final PeertubePlaylistLinkHandlerFactory instance = new PeertubePlaylistLinkHandlerFactory();
@ -23,12 +23,12 @@ public class PeertubePlaylistLinkHandlerFactory extends ListLinkHandlerFactory {
String baseUrl = ServiceList.PeerTube.getBaseUrl();
return getUrl(id, contentFilters, sortFilter, baseUrl);
}
@Override
public String getUrl(String id, List<String> contentFilters, String sortFilter, String baseUrl) {
return baseUrl + VIDEO_CHANNELS_ENDPOINT + id;
}
@Override
public String getId(String url) throws ParsingException {
return Parser.matchGroup1(ID_PATTERN, url);

View File

@ -1,13 +1,13 @@
package org.schabi.newpipe.extractor.services.peertube.linkHandler;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.util.List;
import org.schabi.newpipe.extractor.ServiceList;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.linkhandler.SearchQueryHandlerFactory;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.util.List;
public class PeertubeSearchQueryHandlerFactory extends SearchQueryHandlerFactory {
public static final String CHARSET_UTF_8 = "UTF-8";
@ -21,9 +21,9 @@ public class PeertubeSearchQueryHandlerFactory extends SearchQueryHandlerFactory
@Override
public String getUrl(String searchString, List<String> contentFilters, String sortFilter) throws ParsingException {
String baseUrl = ServiceList.PeerTube.getBaseUrl();
return getUrl(searchString, contentFilters, sortFilter, baseUrl);
return getUrl(searchString, contentFilters, sortFilter, baseUrl);
}
@Override
public String getUrl(String searchString, List<String> contentFilters, String sortFilter, String baseUrl) throws ParsingException {
try {
@ -38,6 +38,6 @@ public class PeertubeSearchQueryHandlerFactory extends SearchQueryHandlerFactory
@Override
public String[] getAvailableContentFilter() {
return new String[] { VIDEOS };
return new String[]{VIDEOS};
}
}

View File

@ -24,7 +24,7 @@ public class PeertubeStreamLinkHandlerFactory extends LinkHandlerFactory {
String baseUrl = ServiceList.PeerTube.getBaseUrl();
return getUrl(id, baseUrl);
}
@Override
public String getUrl(String id, String baseUrl) {
return baseUrl + VIDEO_ENDPOINT + id;

View File

@ -1,26 +1,26 @@
package org.schabi.newpipe.extractor.services.peertube.linkHandler;
import org.schabi.newpipe.extractor.ServiceList;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandlerFactory;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.schabi.newpipe.extractor.ServiceList;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandlerFactory;
public class PeertubeTrendingLinkHandlerFactory extends ListLinkHandlerFactory {
private static final PeertubeTrendingLinkHandlerFactory instance = new PeertubeTrendingLinkHandlerFactory();
public static final Map<String, String> KIOSK_MAP;
public static final Map<String, String> REVERSE_KIOSK_MAP;
public static final String KIOSK_TRENDING = "Trending";
public static final String KIOSK_MOST_LIKED = "Most liked";
public static final String KIOSK_RECENT = "Recently added";
public static final String KIOSK_LOCAL = "Local";
static {
Map<String, String> map = new HashMap<>();
map.put(KIOSK_TRENDING, "%s/api/v1/videos?sort=-trending");
@ -28,24 +28,24 @@ public class PeertubeTrendingLinkHandlerFactory extends ListLinkHandlerFactory {
map.put(KIOSK_RECENT, "%s/api/v1/videos?sort=-publishedAt");
map.put(KIOSK_LOCAL, "%s/api/v1/videos?sort=-publishedAt&filter=local");
KIOSK_MAP = Collections.unmodifiableMap(map);
Map<String, String> reverseMap = new HashMap<>();
for(Map.Entry<String, String> entry : KIOSK_MAP.entrySet()){
for (Map.Entry<String, String> entry : KIOSK_MAP.entrySet()) {
reverseMap.put(entry.getValue(), entry.getKey());
}
REVERSE_KIOSK_MAP = Collections.unmodifiableMap(reverseMap);
}
public static PeertubeTrendingLinkHandlerFactory getInstance() {
return instance;
}
@Override
public String getUrl(String id, List<String> contentFilters, String sortFilter) {
String baseUrl = ServiceList.PeerTube.getBaseUrl();
return getUrl(id, contentFilters, sortFilter, baseUrl);
}
@Override
public String getUrl(String id, List<String> contentFilters, String sortFilter, String baseUrl) {
return String.format(KIOSK_MAP.get(id), baseUrl);

View File

@ -4,9 +4,9 @@ import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import com.grack.nanojson.JsonParserException;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.channel.ChannelExtractor;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler;
@ -86,7 +86,7 @@ public class SoundcloudChannelExtractor extends ChannelExtractor {
@Nonnull
@Override
public InfoItemsPage<StreamInfoItem> getInitialPage() throws ExtractionException {
if(streamInfoItemsCollector == null) {
if (streamInfoItemsCollector == null) {
computeNextPageAndGetStreams();
}
return new InfoItemsPage<>(streamInfoItemsCollector, getNextPageUrl());
@ -94,7 +94,7 @@ public class SoundcloudChannelExtractor extends ChannelExtractor {
@Override
public String getNextPageUrl() throws ExtractionException {
if(nextPageUrl == null) {
if (nextPageUrl == null) {
computeNextPageAndGetStreams();
}
return nextPageUrl;

View File

@ -1,7 +1,7 @@
package org.schabi.newpipe.extractor.services.soundcloud;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandlerFactory;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandlerFactory;
import org.schabi.newpipe.extractor.utils.Parser;
import org.schabi.newpipe.extractor.utils.Utils;

View File

@ -1,7 +1,7 @@
package org.schabi.newpipe.extractor.services.soundcloud;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.kiosk.KioskExtractor;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler;
@ -12,8 +12,8 @@ import javax.annotation.Nonnull;
import java.io.IOException;
public class SoundcloudChartsExtractor extends KioskExtractor<StreamInfoItem> {
private StreamInfoItemsCollector collector = null;
private String nextPageUrl = null;
private StreamInfoItemsCollector collector = null;
private String nextPageUrl = null;
public SoundcloudChartsExtractor(StreamingService service,
ListLinkHandler linkHandler,
@ -68,7 +68,7 @@ public class SoundcloudChartsExtractor extends KioskExtractor<StreamInfoItem> {
@Override
public String getNextPageUrl() throws IOException, ExtractionException {
if(nextPageUrl == null) {
if (nextPageUrl == null) {
computNextPageAndStreams();
}
return nextPageUrl;
@ -77,7 +77,7 @@ public class SoundcloudChartsExtractor extends KioskExtractor<StreamInfoItem> {
@Nonnull
@Override
public InfoItemsPage<StreamInfoItem> getInitialPage() throws IOException, ExtractionException {
if(collector == null) {
if (collector == null) {
computNextPageAndStreams();
}
return new InfoItemsPage<>(collector, getNextPageUrl());

View File

@ -31,9 +31,9 @@ import static org.schabi.newpipe.extractor.ServiceList.SoundCloud;
import static org.schabi.newpipe.extractor.utils.Utils.replaceHttpWithHttps;
public class SoundcloudParsingHelper {
private static final String HARDCODED_CLIENT_ID = "r5ELVSy3RkcjX7ilaL7n2v1Z8irA9SL8"; // Updated on 31/12/19
private static final String HARDCODED_CLIENT_ID = "t0h1jzYMsaZXy6ggnZO71gHK3Ms6CFwE"; // Updated on 14/03/20
private static String clientId;
private SoundcloudParsingHelper() {
}
@ -103,7 +103,7 @@ public class SoundcloudParsingHelper {
/**
* Call the endpoint "/resolve" of the api.<p>
*
* <p>
* See https://developers.soundcloud.com/docs/api/reference#resolve
*/
public static JsonObject resolveFor(Downloader downloader, String url) throws IOException, ExtractionException {

View File

@ -3,8 +3,8 @@ package org.schabi.newpipe.extractor.services.soundcloud;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import com.grack.nanojson.JsonParserException;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler;

View File

@ -1,7 +1,7 @@
package org.schabi.newpipe.extractor.services.soundcloud;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandlerFactory;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandlerFactory;
import org.schabi.newpipe.extractor.utils.Parser;
import org.schabi.newpipe.extractor.utils.Utils;

View File

@ -4,7 +4,10 @@ import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import com.grack.nanojson.JsonParserException;
import org.schabi.newpipe.extractor.*;
import org.schabi.newpipe.extractor.InfoItem;
import org.schabi.newpipe.extractor.InfoItemExtractor;
import org.schabi.newpipe.extractor.InfoItemsCollector;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;

View File

@ -25,7 +25,7 @@ public class SoundcloudSearchQueryHandlerFactory extends SearchQueryHandlerFacto
try {
String url = "https://api-v2.soundcloud.com/search";
if(contentFilter.size() > 0) {
if (contentFilter.size() > 0) {
switch (contentFilter.get(0)) {
case TRACKS:
url += "/tracks";
@ -58,7 +58,7 @@ public class SoundcloudSearchQueryHandlerFactory extends SearchQueryHandlerFacto
@Override
public String[] getAvailableContentFilter() {
return new String[] {
return new String[]{
ALL,
TRACKS,
USERS,

View File

@ -1,31 +1,26 @@
package org.schabi.newpipe.extractor.services.soundcloud;
import static java.util.Collections.singletonList;
import static org.schabi.newpipe.extractor.StreamingService.ServiceInfo.MediaCapability.AUDIO;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.channel.ChannelExtractor;
import org.schabi.newpipe.extractor.comments.CommentsExtractor;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.kiosk.KioskExtractor;
import org.schabi.newpipe.extractor.kiosk.KioskList;
import org.schabi.newpipe.extractor.linkhandler.LinkHandler;
import org.schabi.newpipe.extractor.linkhandler.LinkHandlerFactory;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandlerFactory;
import org.schabi.newpipe.extractor.linkhandler.SearchQueryHandler;
import org.schabi.newpipe.extractor.linkhandler.SearchQueryHandlerFactory;
import org.schabi.newpipe.extractor.linkhandler.*;
import org.schabi.newpipe.extractor.playlist.PlaylistExtractor;
import org.schabi.newpipe.extractor.search.SearchExtractor;
import org.schabi.newpipe.extractor.stream.StreamExtractor;
import org.schabi.newpipe.extractor.subscription.SubscriptionExtractor;
import static java.util.Collections.singletonList;
import static org.schabi.newpipe.extractor.StreamingService.ServiceInfo.MediaCapability.AUDIO;
public class SoundcloudService extends StreamingService {
public SoundcloudService(int id) {
super(id, "SoundCloud", singletonList(AUDIO));
}
@Override
public String getBaseUrl() {
return "https://soundcloud.com";
@ -110,15 +105,15 @@ public class SoundcloudService extends StreamingService {
return new SoundcloudSubscriptionExtractor(this);
}
@Override
public ListLinkHandlerFactory getCommentsLHFactory() {
return null;
}
@Override
public ListLinkHandlerFactory getCommentsLHFactory() {
return null;
}
@Override
@Override
public CommentsExtractor getCommentsExtractor(ListLinkHandler linkHandler)
throws ExtractionException {
return null;
}
}

View File

@ -4,7 +4,9 @@ import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import com.grack.nanojson.JsonParserException;
import org.schabi.newpipe.extractor.*;
import org.schabi.newpipe.extractor.MediaFormat;
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.exceptions.ContentNotAvailableException;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
@ -20,6 +22,7 @@ import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
public class SoundcloudStreamExtractor extends StreamExtractor {
private JsonObject track;
@ -73,10 +76,9 @@ public class SoundcloudStreamExtractor extends StreamExtractor {
return artworkUrlBetterResolution;
}
@Nonnull
@Override
public String getDescription() {
return track.getString("description");
public Description getDescription() {
return new Description(track.getString("description"), Description.PLAIN_TEXT);
}
@Override
@ -183,7 +185,7 @@ public class SoundcloudStreamExtractor extends StreamExtractor {
try {
JsonObject mp3UrlObject = JsonParser.object().from(res);
// Links in this file are also only valid for a short period.
audioStreams.add(new AudioStream(mp3UrlObject.getString("url"),
audioStreams.add(new AudioStream(mp3UrlObject.getString("url"),
MediaFormat.MP3, 128));
} catch (JsonParserException e) {
throw new ParsingException("Could not parse streamable url", e);
@ -254,4 +256,41 @@ public class SoundcloudStreamExtractor extends StreamExtractor {
public String getErrorMessage() {
return null;
}
@Override
public String getHost() throws ParsingException {
return "";
}
@Override
public String getPrivacy() throws ParsingException {
return "";
}
@Override
public String getCategory() throws ParsingException {
return "";
}
@Override
public String getLicence() throws ParsingException {
return "";
}
@Override
public Locale getLanguageInfo() throws ParsingException {
return null;
}
@Nonnull
@Override
public List<String> getTags() throws ParsingException {
return new ArrayList<>();
}
@Nonnull
@Override
public String getSupportInfo() throws ParsingException {
return "";
}
}

View File

@ -1,7 +1,7 @@
package org.schabi.newpipe.extractor.services.soundcloud;
import org.schabi.newpipe.extractor.linkhandler.LinkHandlerFactory;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.linkhandler.LinkHandlerFactory;
import org.schabi.newpipe.extractor.utils.Parser;
import org.schabi.newpipe.extractor.utils.Utils;

View File

@ -4,9 +4,9 @@ import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import com.grack.nanojson.JsonParserException;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.suggestion.SuggestionExtractor;

View File

@ -1,13 +1,5 @@
package org.schabi.newpipe.extractor.services.youtube;
import static java.util.Arrays.asList;
import static org.schabi.newpipe.extractor.StreamingService.ServiceInfo.MediaCapability.AUDIO;
import static org.schabi.newpipe.extractor.StreamingService.ServiceInfo.MediaCapability.COMMENTS;
import static org.schabi.newpipe.extractor.StreamingService.ServiceInfo.MediaCapability.LIVE;
import static org.schabi.newpipe.extractor.StreamingService.ServiceInfo.MediaCapability.VIDEO;
import java.util.List;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.channel.ChannelExtractor;
import org.schabi.newpipe.extractor.comments.CommentsExtractor;
@ -15,28 +7,22 @@ import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.feed.FeedExtractor;
import org.schabi.newpipe.extractor.kiosk.KioskExtractor;
import org.schabi.newpipe.extractor.kiosk.KioskList;
import org.schabi.newpipe.extractor.linkhandler.LinkHandler;
import org.schabi.newpipe.extractor.linkhandler.LinkHandlerFactory;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandlerFactory;
import org.schabi.newpipe.extractor.linkhandler.SearchQueryHandler;
import org.schabi.newpipe.extractor.linkhandler.SearchQueryHandlerFactory;
import org.schabi.newpipe.extractor.linkhandler.*;
import org.schabi.newpipe.extractor.localization.ContentCountry;
import org.schabi.newpipe.extractor.localization.Localization;
import org.schabi.newpipe.extractor.playlist.PlaylistExtractor;
import org.schabi.newpipe.extractor.search.SearchExtractor;
import org.schabi.newpipe.extractor.services.youtube.extractors.*;
import org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeChannelLinkHandlerFactory;
import org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeCommentsLinkHandlerFactory;
import org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubePlaylistLinkHandlerFactory;
import org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeSearchQueryHandlerFactory;
import org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeStreamLinkHandlerFactory;
import org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeTrendingLinkHandlerFactory;
import org.schabi.newpipe.extractor.services.youtube.linkHandler.*;
import org.schabi.newpipe.extractor.stream.StreamExtractor;
import org.schabi.newpipe.extractor.subscription.SubscriptionExtractor;
import org.schabi.newpipe.extractor.suggestion.SuggestionExtractor;
import javax.annotation.Nonnull;
import java.util.List;
import static java.util.Arrays.asList;
import static org.schabi.newpipe.extractor.StreamingService.ServiceInfo.MediaCapability.*;
/*
* Created by Christian Schabesberger on 23.08.15.
@ -125,7 +111,7 @@ public class YoutubeService extends StreamingService {
public KioskExtractor createNewKiosk(StreamingService streamingService,
String url,
String id)
throws ExtractionException {
throws ExtractionException {
return new YoutubeTrendingExtractor(YoutubeService.this,
new YoutubeTrendingLinkHandlerFactory().fromUrl(url), id);
}

View File

@ -1,28 +1,29 @@
package org.schabi.newpipe.extractor.services.youtube.extractors;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import com.grack.nanojson.JsonParserException;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.channel.ChannelExtractor;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.downloader.Response;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler;
import org.schabi.newpipe.extractor.localization.TimeAgoParser;
import org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeChannelLinkHandlerFactory;
import org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeParsingHelper;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import org.schabi.newpipe.extractor.utils.Parser;
import org.schabi.newpipe.extractor.utils.Utils;
import javax.annotation.Nonnull;
import java.io.IOException;
import javax.annotation.Nonnull;
import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeParsingHelper.fixThumbnailUrl;
import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeParsingHelper.getJsonResponse;
import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeParsingHelper.getTextFromObject;
/*
* Created by Christian Schabesberger on 25.07.16.
*
@ -45,10 +46,8 @@ import java.io.IOException;
@SuppressWarnings("WeakerAccess")
public class YoutubeChannelExtractor extends ChannelExtractor {
/*package-private*/ static final String CHANNEL_URL_BASE = "https://www.youtube.com/channel/";
private static final String CHANNEL_URL_PARAMETERS = "/videos?view=0&flow=list&sort=dd&live_view=10000";
private Document doc;
private JsonObject initialData;
private JsonObject videoTab;
public YoutubeChannelExtractor(StreamingService service, ListLinkHandler linkHandler) {
super(service, linkHandler);
@ -56,21 +55,27 @@ public class YoutubeChannelExtractor extends ChannelExtractor {
@Override
public void onFetchPage(@Nonnull Downloader downloader) throws IOException, ExtractionException {
String channelUrl = super.getUrl() + CHANNEL_URL_PARAMETERS;
final Response response = downloader.get(channelUrl, getExtractorLocalization());
doc = YoutubeParsingHelper.parseAndCheckPage(channelUrl, response);
final String url = super.getUrl() + "/videos?pbj=1&view=0&flow=grid";
final JsonArray ajaxJson = getJsonResponse(url, getExtractorLocalization());
initialData = ajaxJson.getObject(1).getObject("response");
YoutubeParsingHelper.defaultAlertsCheck(initialData);
}
@Override
public String getNextPageUrl() throws ExtractionException {
return getNextPageUrlFrom(doc);
if (getVideoTab() == null) return "";
return getNextPageUrlFrom(getVideoTab().getObject("content").getObject("sectionListRenderer")
.getArray("contents").getObject(0).getObject("itemSectionRenderer")
.getArray("contents").getObject(0).getObject("gridRenderer").getArray("continuations"));
}
@Nonnull
@Override
public String getUrl() throws ParsingException {
try {
return CHANNEL_URL_BASE + getId();
return YoutubeChannelLinkHandlerFactory.getInstance().getUrl("channel/" + getId());
} catch (ParsingException e) {
return super.getUrl();
}
@ -80,15 +85,7 @@ public class YoutubeChannelExtractor extends ChannelExtractor {
@Override
public String getId() throws ParsingException {
try {
return doc.select("meta[itemprop=\"channelId\"]").first().attr("content");
} catch (Exception ignored) {}
// fallback method; does not work with channels that have no "Subscribe" button (e.g. EminemVEVO)
try {
Element element = doc.getElementsByClass("yt-uix-subscription-button").first();
if (element == null) element = doc.getElementsByClass("yt-uix-subscription-preferences-button").first();
return element.attr("data-channel-external-id");
return initialData.getObject("header").getObject("c4TabbedHeaderRenderer").getString("channelId");
} catch (Exception e) {
throw new ParsingException("Could not get channel id", e);
}
@ -98,7 +95,7 @@ public class YoutubeChannelExtractor extends ChannelExtractor {
@Override
public String getName() throws ParsingException {
try {
return doc.select("meta[property=\"og:title\"]").first().attr("content");
return initialData.getObject("header").getObject("c4TabbedHeaderRenderer").getString("title");
} catch (Exception e) {
throw new ParsingException("Could not get channel name", e);
}
@ -107,7 +104,10 @@ public class YoutubeChannelExtractor extends ChannelExtractor {
@Override
public String getAvatarUrl() throws ParsingException {
try {
return doc.select("img[class=\"channel-header-profile-image\"]").first().attr("abs:src");
String url = initialData.getObject("header").getObject("c4TabbedHeaderRenderer").getObject("avatar")
.getArray("thumbnails").getObject(0).getString("url");
return fixThumbnailUrl(url);
} catch (Exception e) {
throw new ParsingException("Could not get avatar", e);
}
@ -116,13 +116,18 @@ public class YoutubeChannelExtractor extends ChannelExtractor {
@Override
public String getBannerUrl() throws ParsingException {
try {
Element el = doc.select("div[id=\"gh-banner\"]").first().select("style").first();
String cssContent = el.html();
String url = "https:" + Parser.matchGroup1("url\\(([^)]+)\\)", cssContent);
String url = null;
try {
url = initialData.getObject("header").getObject("c4TabbedHeaderRenderer").getObject("banner")
.getArray("thumbnails").getObject(0).getString("url");
} catch (Exception ignored) {}
if (url == null || url.contains("s.ytimg.com") || url.contains("default_banner")) {
return null;
}
return url.contains("s.ytimg.com") || url.contains("default_banner") ? null : url;
return fixThumbnailUrl(url);
} catch (Exception e) {
throw new ParsingException("Could not get Banner", e);
throw new ParsingException("Could not get banner", e);
}
}
@ -137,25 +142,27 @@ public class YoutubeChannelExtractor extends ChannelExtractor {
@Override
public long getSubscriberCount() throws ParsingException {
final Element el = doc.select("span[class*=\"yt-subscription-button-subscriber-count\"]").first();
if (el != null) {
String elTitle = el.attr("title");
final JsonObject subscriberInfo = initialData.getObject("header").getObject("c4TabbedHeaderRenderer").getObject("subscriberCountText");
if (subscriberInfo != null) {
try {
return Utils.mixedNumberWordToLong(elTitle);
return Utils.mixedNumberWordToLong(getTextFromObject(subscriberInfo));
} catch (NumberFormatException e) {
throw new ParsingException("Could not get subscriber count", e);
}
} else {
// If the element is null, the channel have the subscriber count disabled
return -1;
// If there's no subscribe button, the channel has the subscriber count disabled
if (initialData.getObject("header").getObject("c4TabbedHeaderRenderer").getObject("subscribeButton") == null) {
return -1;
} else {
return 0;
}
}
}
@Override
public String getDescription() throws ParsingException {
try {
return doc.select("meta[name=\"description\"]").first().attr("content");
return initialData.getObject("metadata").getObject("channelMetadataRenderer").getString("description");
} catch (Exception e) {
throw new ParsingException("Could not get channel description", e);
}
@ -165,8 +172,14 @@ public class YoutubeChannelExtractor extends ChannelExtractor {
@Override
public InfoItemsPage<StreamInfoItem> getInitialPage() throws ExtractionException {
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
Element ul = doc.select("ul[id=\"browse-items-primary\"]").first();
collectStreamsFrom(collector, ul);
if (getVideoTab() != null) {
JsonArray videos = getVideoTab().getObject("content").getObject("sectionListRenderer").getArray("contents")
.getObject(0).getObject("itemSectionRenderer").getArray("contents").getObject(0)
.getObject("gridRenderer").getArray("items");
collectStreamsFrom(collector, videos);
}
return new InfoItemsPage<>(collector, getNextPageUrl());
}
@ -181,106 +194,81 @@ public class YoutubeChannelExtractor extends ChannelExtractor {
fetchPage();
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
JsonObject ajaxJson;
try {
final String response = getDownloader().get(pageUrl, getExtractorLocalization()).responseBody();
ajaxJson = JsonParser.object().from(response);
} catch (JsonParserException pe) {
throw new ParsingException("Could not parse json data for next streams", pe);
}
final JsonArray ajaxJson = getJsonResponse(pageUrl, getExtractorLocalization());
final Document ajaxHtml = Jsoup.parse(ajaxJson.getString("content_html"), pageUrl);
collectStreamsFrom(collector, ajaxHtml.select("body").first());
JsonObject sectionListContinuation = ajaxJson.getObject(1).getObject("response")
.getObject("continuationContents").getObject("gridContinuation");
return new InfoItemsPage<>(collector, getNextPageUrlFromAjaxPage(ajaxJson, pageUrl));
collectStreamsFrom(collector, sectionListContinuation.getArray("items"));
return new InfoItemsPage<>(collector, getNextPageUrlFrom(sectionListContinuation.getArray("continuations")));
}
private String getNextPageUrlFromAjaxPage(final JsonObject ajaxJson, final String pageUrl)
throws ParsingException {
String loadMoreHtmlDataRaw = ajaxJson.getString("load_more_widget_html");
if (!loadMoreHtmlDataRaw.isEmpty()) {
return getNextPageUrlFrom(Jsoup.parse(loadMoreHtmlDataRaw, pageUrl));
} else {
return "";
}
private String getNextPageUrlFrom(JsonArray continuations) {
if (continuations == null) return "";
JsonObject nextContinuationData = continuations.getObject(0).getObject("nextContinuationData");
String continuation = nextContinuationData.getString("continuation");
String clickTrackingParams = nextContinuationData.getString("clickTrackingParams");
return "https://www.youtube.com/browse_ajax?ctoken=" + continuation + "&continuation=" + continuation
+ "&itct=" + clickTrackingParams;
}
private String getNextPageUrlFrom(Document d) throws ParsingException {
try {
Element button = d.select("button[class*=\"yt-uix-load-more\"]").first();
if (button != null) {
return button.attr("abs:data-uix-load-more-href");
} else {
// Sometimes channels are simply so small, they don't have a more streams/videos
return "";
}
} catch (Exception e) {
throw new ParsingException("Could not get next page url", e);
}
}
private void collectStreamsFrom(StreamInfoItemsCollector collector, Element element) throws ParsingException {
private void collectStreamsFrom(StreamInfoItemsCollector collector, JsonArray videos) throws ParsingException {
collector.reset();
final String uploaderName = getName();
final String uploaderUrl = getUrl();
final TimeAgoParser timeAgoParser = getTimeAgoParser();
for (final Element li : element.children()) {
if (li.select("div[class=\"feed-item-dismissable\"]").first() != null) {
collector.commit(new YoutubeStreamInfoItemExtractor(li, timeAgoParser) {
for (Object video : videos) {
if (((JsonObject) video).getObject("gridVideoRenderer") != null) {
collector.commit(new YoutubeStreamInfoItemExtractor(
((JsonObject) video).getObject("gridVideoRenderer"), timeAgoParser) {
@Override
public String getUrl() throws ParsingException {
try {
Element el = li.select("div[class=\"feed-item-dismissable\"]").first();
Element dl = el.select("h3").first().select("a").first();
return dl.attr("abs:href");
} catch (Exception e) {
throw new ParsingException("Could not get web page url for the video", e);
}
}
@Override
public String getName() throws ParsingException {
try {
Element el = li.select("div[class=\"feed-item-dismissable\"]").first();
Element dl = el.select("h3").first().select("a").first();
return dl.text();
} catch (Exception e) {
throw new ParsingException("Could not get title", e);
}
}
@Override
public String getUploaderName() throws ParsingException {
public String getUploaderName() {
return uploaderName;
}
@Override
public String getUploaderUrl() throws ParsingException {
public String getUploaderUrl() {
return uploaderUrl;
}
@Override
public String getThumbnailUrl() throws ParsingException {
try {
String url;
Element te = li.select("span[class=\"yt-thumb-clip\"]").first()
.select("img").first();
url = te.attr("abs:src");
// Sometimes youtube sends links to gif files which somehow seem to not exist
// anymore. Items with such gif also offer a secondary image source. So we are going
// to use that if we've caught such an item.
if (url.contains(".gif")) {
url = te.attr("abs:data-thumb");
}
return url;
} catch (Exception e) {
throw new ParsingException("Could not get thumbnail url", e);
}
}
});
}
}
}
private JsonObject getVideoTab() throws ParsingException {
if (this.videoTab != null) return this.videoTab;
JsonArray tabs = initialData.getObject("contents").getObject("twoColumnBrowseResultsRenderer")
.getArray("tabs");
JsonObject videoTab = null;
for (Object tab : tabs) {
if (((JsonObject) tab).getObject("tabRenderer") != null) {
if (((JsonObject) tab).getObject("tabRenderer").getString("title").equals("Videos")) {
videoTab = ((JsonObject) tab).getObject("tabRenderer");
break;
}
}
}
if (videoTab == null) {
throw new ParsingException("Could not find Videos tab");
}
try {
if (getTextFromObject(videoTab.getObject("content").getObject("sectionListRenderer")
.getArray("contents").getObject(0).getObject("itemSectionRenderer")
.getArray("contents").getObject(0).getObject("messageRenderer")
.getObject("text")).equals("This channel has no videos."))
return null;
} catch (Exception ignored) {}
this.videoTab = videoTab;
return videoTab;
}
}

View File

@ -1,12 +1,14 @@
package org.schabi.newpipe.extractor.services.youtube.extractors;
import org.jsoup.nodes.Element;
import com.grack.nanojson.JsonObject;
import org.schabi.newpipe.extractor.channel.ChannelInfoItemExtractor;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeChannelLinkHandlerFactory;
import org.schabi.newpipe.extractor.utils.Utils;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeParsingHelper.fixThumbnailUrl;
import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeParsingHelper.getTextFromObject;
/*
* Created by Christian Schabesberger on 12.02.17.
@ -29,87 +31,67 @@ import java.util.regex.Pattern;
*/
public class YoutubeChannelInfoItemExtractor implements ChannelInfoItemExtractor {
private final Element el;
private JsonObject channelInfoItem;
public YoutubeChannelInfoItemExtractor(Element el) {
this.el = el;
public YoutubeChannelInfoItemExtractor(JsonObject channelInfoItem) {
this.channelInfoItem = channelInfoItem;
}
@Override
public String getThumbnailUrl() throws ParsingException {
Element img = el.select("span[class*=\"yt-thumb-simple\"]").first()
.select("img").first();
try {
String url = channelInfoItem.getObject("thumbnail").getArray("thumbnails").getObject(0).getString("url");
String url = img.attr("abs:src");
if (url.contains("gif")) {
url = img.attr("abs:data-thumb");
return fixThumbnailUrl(url);
} catch (Exception e) {
throw new ParsingException("Could not get thumbnail url", e);
}
return url;
}
@Override
public String getName() throws ParsingException {
return el.select("a[class*=\"yt-uix-tile-link\"]").first()
.text();
try {
return getTextFromObject(channelInfoItem.getObject("title"));
} catch (Exception e) {
throw new ParsingException("Could not get name", e);
}
}
@Override
public String getUrl() throws ParsingException {
try {
String buttonTrackingUrl = el.select("button[class*=\"yt-uix-button\"]").first()
.attr("abs:data-href");
Pattern channelIdPattern = Pattern.compile("(?:.*?)\\%252Fchannel\\%252F([A-Za-z0-9\\-\\_]+)(?:.*)");
Matcher match = channelIdPattern.matcher(buttonTrackingUrl);
if (match.matches()) {
return YoutubeChannelExtractor.CHANNEL_URL_BASE + match.group(1);
}
} catch(Exception ignored) {}
// fallback method for channels without "Subscribe" button (or just in case yt changes things)
// provides an url with "/user/NAME", inconsistent with stream and channel extractor: tests will fail
try {
return el.select("a[class*=\"yt-uix-tile-link\"]").first()
.attr("abs:href");
String id = "channel/" + channelInfoItem.getString("channelId");
return YoutubeChannelLinkHandlerFactory.getInstance().getUrl(id);
} catch (Exception e) {
throw new ParsingException("Could not get channel url", e);
throw new ParsingException("Could not get url", e);
}
}
@Override
public long getSubscriberCount() throws ParsingException {
final Element subsEl = el.select("span[class*=\"yt-subscriber-count\"]").first();
if (subsEl != null) {
try {
return Long.parseLong(Utils.removeNonDigitCharacters(subsEl.text()));
} catch (NumberFormatException e) {
throw new ParsingException("Could not get subscriber count", e);
}
} else {
// If the element is null, the channel have the subscriber count disabled
return -1;
try {
String subscribers = getTextFromObject(channelInfoItem.getObject("subscriberCountText"));
return Utils.mixedNumberWordToLong(subscribers);
} catch (Exception e) {
throw new ParsingException("Could not get subscriber count", e);
}
}
@Override
public long getStreamCount() throws ParsingException {
Element metaEl = el.select("ul[class*=\"yt-lockup-meta-info\"]").first();
if (metaEl == null) {
return 0;
} else {
return Long.parseLong(Utils.removeNonDigitCharacters(metaEl.text()));
try {
return Long.parseLong(Utils.removeNonDigitCharacters(getTextFromObject(channelInfoItem.getObject("videoCountText"))));
} catch (Exception e) {
throw new ParsingException("Could not get stream count", e);
}
}
@Override
public String getDescription() throws ParsingException {
Element desEl = el.select("div[class*=\"yt-lockup-description\"]").first();
if (desEl == null) {
return "";
} else {
return desEl.text();
try {
return getTextFromObject(channelInfoItem.getObject("descriptionSnippet"));
} catch (Exception e) {
throw new ParsingException("Could not get description", e);
}
}
}

View File

@ -3,7 +3,6 @@ package org.schabi.newpipe.extractor.services.youtube.extractors;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.comments.CommentsExtractor;
import org.schabi.newpipe.extractor.comments.CommentsInfoItem;
@ -22,7 +21,9 @@ import javax.annotation.Nonnull;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.util.*;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.regex.Pattern;
import static java.util.Collections.singletonList;
@ -58,14 +59,14 @@ public class YoutubeCommentsExtractor extends CommentsExtractor {
}
private String getNextPageUrl(JsonObject ajaxJson) throws IOException, ParsingException {
JsonArray arr;
try {
arr = JsonUtils.getArray(ajaxJson, "response.continuationContents.commentSectionContinuation.continuations");
} catch (Exception e) {
return "";
}
if(arr.isEmpty()) {
if (arr.isEmpty()) {
return "";
}
String continuation;
@ -107,11 +108,11 @@ public class YoutubeCommentsExtractor extends CommentsExtractor {
}
private void collectCommentsFrom(CommentsInfoItemsCollector collector, JsonObject ajaxJson) throws ParsingException {
JsonArray contents;
try {
contents = JsonUtils.getArray(ajaxJson, "response.continuationContents.commentSectionContinuation.items");
}catch(Exception e) {
} catch (Exception e) {
//no comments
return;
}
@ -119,12 +120,12 @@ public class YoutubeCommentsExtractor extends CommentsExtractor {
List<Object> comments;
try {
comments = JsonUtils.getValues(contents, "commentThreadRenderer.comment.commentRenderer");
}catch(Exception e) {
} catch (Exception e) {
throw new ParsingException("unable to get parse youtube comments", e);
}
for(Object c: comments) {
if(c instanceof JsonObject) {
for (Object c : comments) {
if (c instanceof JsonObject) {
CommentsInfoItemExtractor extractor = new YoutubeCommentsInfoItemExtractor((JsonObject) c, getUrl(), getTimeAgoParser());
collector.commit(extractor);
}
@ -132,7 +133,7 @@ public class YoutubeCommentsExtractor extends CommentsExtractor {
}
private void fetchTitle(JsonArray contents) {
if(null == title) {
if (title == null) {
try {
title = getYoutubeText(JsonUtils.getObject(contents.getObject(0), "commentThreadRenderer.commentTargetTitle"));
} catch (Exception e) {
@ -190,7 +191,7 @@ public class YoutubeCommentsExtractor extends CommentsExtractor {
int endIndex = doc.indexOf(end, beginIndex);
return doc.substring(beginIndex, endIndex);
}
public static String getYoutubeText(@Nonnull JsonObject object) throws ParsingException {
try {
return JsonUtils.getString(object, "simpleText");
@ -198,7 +199,7 @@ public class YoutubeCommentsExtractor extends CommentsExtractor {
try {
JsonArray arr = JsonUtils.getArray(object, "runs");
String result = "";
for(int i=0; i<arr.size();i++) {
for (int i = 0; i < arr.size(); i++) {
result = result + JsonUtils.getString(arr.getObject(i), "text");
}
return result;
@ -207,5 +208,5 @@ public class YoutubeCommentsExtractor extends CommentsExtractor {
}
}
}
}

View File

@ -4,8 +4,8 @@ import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import org.schabi.newpipe.extractor.comments.CommentsInfoItemExtractor;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.localization.TimeAgoParser;
import org.schabi.newpipe.extractor.localization.DateWrapper;
import org.schabi.newpipe.extractor.localization.TimeAgoParser;
import org.schabi.newpipe.extractor.utils.JsonUtils;
import org.schabi.newpipe.extractor.utils.Utils;

View File

@ -1,34 +1,33 @@
package org.schabi.newpipe.extractor.services.youtube.extractors;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import com.grack.nanojson.JsonParserException;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.downloader.Response;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.linkhandler.LinkHandlerFactory;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler;
import org.schabi.newpipe.extractor.localization.TimeAgoParser;
import org.schabi.newpipe.extractor.playlist.PlaylistExtractor;
import org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeParsingHelper;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import org.schabi.newpipe.extractor.stream.StreamType;
import org.schabi.newpipe.extractor.utils.Utils;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.io.IOException;
import javax.annotation.Nonnull;
import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeParsingHelper.fixThumbnailUrl;
import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeParsingHelper.getJsonResponse;
import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeParsingHelper.getTextFromObject;
import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeParsingHelper.getUrlFromNavigationEndpoint;
@SuppressWarnings("WeakerAccess")
public class YoutubePlaylistExtractor extends PlaylistExtractor {
private Document doc;
private JsonObject initialData;
private JsonObject playlistInfo;
public YoutubePlaylistExtractor(StreamingService service, ListLinkHandler linkHandler) {
super(service, linkHandler);
@ -36,21 +35,66 @@ public class YoutubePlaylistExtractor extends PlaylistExtractor {
@Override
public void onFetchPage(@Nonnull Downloader downloader) throws IOException, ExtractionException {
final String url = getUrl();
final Response response = downloader.get(url, getExtractorLocalization());
doc = YoutubeParsingHelper.parseAndCheckPage(url, response);
final String url = getUrl() + "&pbj=1";
final JsonArray ajaxJson = getJsonResponse(url, getExtractorLocalization());
initialData = ajaxJson.getObject(1).getObject("response");
YoutubeParsingHelper.defaultAlertsCheck(initialData);
playlistInfo = getPlaylistInfo();
}
private JsonObject getUploaderInfo() throws ParsingException {
JsonArray items = initialData.getObject("sidebar").getObject("playlistSidebarRenderer").getArray("items");
try {
JsonObject uploaderInfo = items.getObject(1).getObject("playlistSidebarSecondaryInfoRenderer")
.getObject("videoOwner").getObject("videoOwnerRenderer");
if (uploaderInfo != null) {
return uploaderInfo;
}
} catch (Exception ignored) {}
// we might want to create a loop here instead of using duplicated code
try {
JsonObject uploaderInfo = items.getObject(items.size()).getObject("playlistSidebarSecondaryInfoRenderer")
.getObject("videoOwner").getObject("videoOwnerRenderer");
if (uploaderInfo != null) {
return uploaderInfo;
}
} catch (Exception e) {
throw new ParsingException("Could not get uploader info", e);
}
throw new ParsingException("Could not get uploader info");
}
private JsonObject getPlaylistInfo() throws ParsingException {
try {
return initialData.getObject("sidebar").getObject("playlistSidebarRenderer").getArray("items")
.getObject(0).getObject("playlistSidebarPrimaryInfoRenderer");
} catch (Exception e) {
throw new ParsingException("Could not get PlaylistInfo", e);
}
}
@Override
public String getNextPageUrl() throws ExtractionException {
return getNextPageUrlFrom(doc);
public String getNextPageUrl() {
return getNextPageUrlFrom(initialData.getObject("contents").getObject("twoColumnBrowseResultsRenderer")
.getArray("tabs").getObject(0).getObject("tabRenderer").getObject("content")
.getObject("sectionListRenderer").getArray("contents").getObject(0)
.getObject("itemSectionRenderer").getArray("contents").getObject(0)
.getObject("playlistVideoListRenderer").getArray("continuations"));
}
@Nonnull
@Override
public String getName() throws ParsingException {
try {
return doc.select("div[id=pl-header] h1[class=pl-header-title]").first().text();
String name = getTextFromObject(playlistInfo.getObject("title"));
if (name != null) return name;
} catch (Exception ignored) {}
try {
return initialData.getObject("microformat").getObject("microformatDataRenderer").getString("title");
} catch (Exception e) {
throw new ParsingException("Could not get playlist name", e);
}
@ -58,25 +102,35 @@ public class YoutubePlaylistExtractor extends PlaylistExtractor {
@Override
public String getThumbnailUrl() throws ParsingException {
String url = null;
try {
return doc.select("div[id=pl-header] div[class=pl-header-thumb] img").first().attr("abs:src");
} catch (Exception e) {
throw new ParsingException("Could not get playlist thumbnail", e);
url = playlistInfo.getObject("thumbnailRenderer").getObject("playlistVideoThumbnailRenderer")
.getObject("thumbnail").getArray("thumbnails").getObject(0).getString("url");
} catch (Exception ignored) {}
if (url == null) {
try {
url = initialData.getObject("microformat").getObject("microformatDataRenderer").getObject("thumbnail")
.getArray("thumbnails").getObject(0).getString("url");
} catch (Exception ignored) {}
if (url == null) throw new ParsingException("Could not get playlist thumbnail");
}
return fixThumbnailUrl(url);
}
@Override
public String getBannerUrl() {
return ""; // Banner can't be handled by frontend right now.
// Whoever is willing to implement this should also implement this in the fornt end
// Whoever is willing to implement this should also implement it in the frontend.
}
@Override
public String getUploaderUrl() throws ParsingException {
try {
return YoutubeChannelExtractor.CHANNEL_URL_BASE +
doc.select("button[class*=\"yt-uix-subscription-button\"]")
.first().attr("data-channel-external-id");
return getUrlFromNavigationEndpoint(getUploaderInfo().getObject("navigationEndpoint"));
} catch (Exception e) {
throw new ParsingException("Could not get playlist uploader url", e);
}
@ -85,7 +139,7 @@ public class YoutubePlaylistExtractor extends PlaylistExtractor {
@Override
public String getUploaderName() throws ParsingException {
try {
return doc.select("span[class=\"qualified-channel-title-text\"]").first().select("a").first().text();
return getTextFromObject(getUploaderInfo().getObject("title"));
} catch (Exception e) {
throw new ParsingException("Could not get playlist uploader name", e);
}
@ -94,7 +148,9 @@ public class YoutubePlaylistExtractor extends PlaylistExtractor {
@Override
public String getUploaderAvatarUrl() throws ParsingException {
try {
return doc.select("div[id=gh-banner] img[class=channel-header-profile-image]").first().attr("abs:src");
String url = getUploaderInfo().getObject("thumbnail").getArray("thumbnails").getObject(0).getString("url");
return fixThumbnailUrl(url);
} catch (Exception e) {
throw new ParsingException("Could not get playlist uploader avatar", e);
}
@ -102,33 +158,26 @@ public class YoutubePlaylistExtractor extends PlaylistExtractor {
@Override
public long getStreamCount() throws ParsingException {
String input;
try {
input = doc.select("ul[class=\"pl-header-details\"] li").get(1).text();
} catch (IndexOutOfBoundsException e) {
String viewsText = getTextFromObject(getPlaylistInfo().getArray("stats").getObject(0));
return Long.parseLong(Utils.removeNonDigitCharacters(viewsText));
} catch (Exception e) {
throw new ParsingException("Could not get video count from playlist", e);
}
try {
return Long.parseLong(Utils.removeNonDigitCharacters(input));
} catch (NumberFormatException e) {
// When there's no videos in a playlist, there's no number in the "innerHtml",
// all characters that is not a number is removed, so we try to parse a empty string
if (!input.isEmpty()) {
return 0;
} else {
throw new ParsingException("Could not handle input: " + input, e);
}
}
}
@Nonnull
@Override
public InfoItemsPage<StreamInfoItem> getInitialPage() throws ExtractionException {
public InfoItemsPage<StreamInfoItem> getInitialPage() {
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
Element tbody = doc.select("tbody[id=\"pl-load-more-destination\"]").first();
collectStreamsFrom(collector, tbody);
JsonArray videos = initialData.getObject("contents").getObject("twoColumnBrowseResultsRenderer")
.getArray("tabs").getObject(0).getObject("tabRenderer").getObject("content")
.getObject("sectionListRenderer").getArray("contents").getObject(0)
.getObject("itemSectionRenderer").getArray("contents").getObject(0)
.getObject("playlistVideoListRenderer").getArray("contents");
collectStreamsFrom(collector, videos);
return new InfoItemsPage<>(collector, getNextPageUrl());
}
@ -139,155 +188,42 @@ public class YoutubePlaylistExtractor extends PlaylistExtractor {
}
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
JsonObject pageJson;
try {
final String responseBody = getDownloader().get(pageUrl, getExtractorLocalization()).responseBody();
pageJson = JsonParser.object().from(responseBody);
} catch (JsonParserException pe) {
throw new ParsingException("Could not parse ajax json", pe);
}
final JsonArray ajaxJson = getJsonResponse(pageUrl, getExtractorLocalization());
final Document pageHtml = Jsoup.parse("<table><tbody id=\"pl-load-more-destination\">"
+ pageJson.getString("content_html")
+ "</tbody></table>", pageUrl);
JsonObject sectionListContinuation = ajaxJson.getObject(1).getObject("response")
.getObject("continuationContents").getObject("playlistVideoListContinuation");
collectStreamsFrom(collector, pageHtml.select("tbody[id=\"pl-load-more-destination\"]").first());
collectStreamsFrom(collector, sectionListContinuation.getArray("contents"));
return new InfoItemsPage<>(collector, getNextPageUrlFromAjax(pageJson, pageUrl));
return new InfoItemsPage<>(collector, getNextPageUrlFrom(sectionListContinuation.getArray("continuations")));
}
private String getNextPageUrlFromAjax(final JsonObject pageJson, final String pageUrl)
throws ParsingException{
String nextPageHtml = pageJson.getString("load_more_widget_html");
if (!nextPageHtml.isEmpty()) {
return getNextPageUrlFrom(Jsoup.parse(nextPageHtml, pageUrl));
} else {
private String getNextPageUrlFrom(JsonArray continuations) {
if (continuations == null) {
return "";
}
JsonObject nextContinuationData = continuations.getObject(0).getObject("nextContinuationData");
String continuation = nextContinuationData.getString("continuation");
String clickTrackingParams = nextContinuationData.getString("clickTrackingParams");
return "https://www.youtube.com/browse_ajax?ctoken=" + continuation + "&continuation=" + continuation
+ "&itct=" + clickTrackingParams;
}
private String getNextPageUrlFrom(Document d) throws ParsingException {
try {
Element button = d.select("button[class*=\"yt-uix-load-more\"]").first();
if (button != null) {
return button.attr("abs:data-uix-load-more-href");
} else {
// Sometimes playlists are simply so small, they don't have a more streams/videos
return "";
}
} catch (Exception e) {
throw new ParsingException("could not get next streams' url", e);
}
}
private void collectStreamsFrom(@Nonnull StreamInfoItemsCollector collector, @Nullable Element element) {
private void collectStreamsFrom(StreamInfoItemsCollector collector, JsonArray videos) {
collector.reset();
if (element == null) {
return;
}
final LinkHandlerFactory streamLinkHandlerFactory = getService().getStreamLHFactory();
final TimeAgoParser timeAgoParser = getTimeAgoParser();
for (final Element li : element.children()) {
if(isDeletedItem(li)) {
continue;
for (Object video : videos) {
if (((JsonObject) video).getObject("playlistVideoRenderer") != null) {
collector.commit(new YoutubeStreamInfoItemExtractor(((JsonObject) video).getObject("playlistVideoRenderer"), timeAgoParser) {
@Override
public long getViewCount() {
return -1;
}
});
}
collector.commit(new YoutubeStreamInfoItemExtractor(li, timeAgoParser) {
public Element uploaderLink;
@Override
public boolean isAd() {
return false;
}
@Override
public String getUrl() throws ParsingException {
try {
return streamLinkHandlerFactory.fromId(li.attr("data-video-id")).getUrl();
} catch (Exception e) {
throw new ParsingException("Could not get web page url for the video", e);
}
}
@Override
public String getName() throws ParsingException {
try {
return li.attr("data-title");
} catch (Exception e) {
throw new ParsingException("Could not get title", e);
}
}
@Override
public long getDuration() throws ParsingException {
try {
if (getStreamType() == StreamType.LIVE_STREAM) return -1;
Element first = li.select("div[class=\"timestamp\"] span").first();
if (first == null) {
// Video unavailable (private, deleted, etc.), this is a thing that happens specifically with playlists,
// because in other cases, those videos don't even show up
return -1;
}
return YoutubeParsingHelper.parseDurationString(first.text());
} catch (Exception e) {
throw new ParsingException("Could not get duration" + getUrl(), e);
}
}
private Element getUploaderLink() {
// should always be present since we filter deleted items
if(uploaderLink == null) {
uploaderLink = li.select("div[class=pl-video-owner] a").first();
}
return uploaderLink;
}
@Override
public String getUploaderName() throws ParsingException {
return getUploaderLink().text();
}
@Override
public String getUploaderUrl() throws ParsingException {
// this url is not always in the form "/channel/..."
// sometimes Youtube provides urls in the from "/user/..."
return getUploaderLink().attr("abs:href");
}
@Override
public String getTextualUploadDate() throws ParsingException {
return "";
}
@Override
public long getViewCount() throws ParsingException {
return -1;
}
@Override
public String getThumbnailUrl() throws ParsingException {
try {
return "https://i.ytimg.com/vi/" + streamLinkHandlerFactory.fromUrl(getUrl()).getId() + "/hqdefault.jpg";
} catch (Exception e) {
throw new ParsingException("Could not get thumbnail url", e);
}
}
});
}
}
/**
* Check if the playlist item is deleted
* @param li the list item
* @return true if the item is deleted
*/
private boolean isDeletedItem(Element li) {
return li.select("div[class=pl-video-owner] a").isEmpty();
}
}

View File

@ -1,97 +1,68 @@
package org.schabi.newpipe.extractor.services.youtube.extractors;
import org.jsoup.nodes.Element;
import com.grack.nanojson.JsonObject;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.playlist.PlaylistInfoItemExtractor;
import org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubePlaylistLinkHandlerFactory;
import org.schabi.newpipe.extractor.utils.Utils;
public class YoutubePlaylistInfoItemExtractor implements PlaylistInfoItemExtractor {
private final Element el;
import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeParsingHelper.fixThumbnailUrl;
import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeParsingHelper.getTextFromObject;
public YoutubePlaylistInfoItemExtractor(Element el) {
this.el = el;
public class YoutubePlaylistInfoItemExtractor implements PlaylistInfoItemExtractor {
private JsonObject playlistInfoItem;
public YoutubePlaylistInfoItemExtractor(JsonObject playlistInfoItem) {
this.playlistInfoItem = playlistInfoItem;
}
@Override
public String getThumbnailUrl() throws ParsingException {
String url;
try {
Element te = el.select("div[class=\"yt-thumb video-thumb\"]").first()
.select("img").first();
url = te.attr("abs:src");
String url = playlistInfoItem.getArray("thumbnails").getObject(0)
.getArray("thumbnails").getObject(0).getString("url");
if (url.contains(".gif")) {
url = te.attr("abs:data-thumb");
}
return fixThumbnailUrl(url);
} catch (Exception e) {
throw new ParsingException("Failed to extract playlist thumbnail url", e);
throw new ParsingException("Could not get thumbnail url", e);
}
return url;
}
@Override
public String getName() throws ParsingException {
String name;
try {
final Element title = el.select("[class=\"yt-lockup-title\"]").first()
.select("a").first();
name = title == null ? "" : title.text();
return getTextFromObject(playlistInfoItem.getObject("title"));
} catch (Exception e) {
throw new ParsingException("Failed to extract playlist name", e);
throw new ParsingException("Could not get name", e);
}
return name;
}
@Override
public String getUrl() throws ParsingException {
try {
final Element a = el.select("div[class=\"yt-lockup-meta\"]")
.select("ul[class=\"yt-lockup-meta-info\"]")
.select("li").select("a").first();
if(a != null) {
return a.attr("abs:href");
}
// this is for yt premium playlists
return el.select("h3[class=\"yt-lockup-title\"").first()
.select("a").first()
.attr("abs:href");
String id = playlistInfoItem.getString("playlistId");
return YoutubePlaylistLinkHandlerFactory.getInstance().getUrl(id);
} catch (Exception e) {
throw new ParsingException("Failed to extract playlist url", e);
throw new ParsingException("Could not get url", e);
}
}
@Override
public String getUploaderName() throws ParsingException {
String name;
try {
final Element div = el.select("div[class=\"yt-lockup-byline\"]").first()
.select("a").first();
name = div.text();
return getTextFromObject(playlistInfoItem.getObject("longBylineText"));
} catch (Exception e) {
throw new ParsingException("Failed to extract playlist uploader", e);
throw new ParsingException("Could not get uploader name", e);
}
return name;
}
@Override
public long getStreamCount() throws ParsingException {
try {
final Element count = el.select("span[class=\"formatted-video-count-label\"]").first()
.select("b").first();
return count == null ? 0 : Long.parseLong(Utils.removeNonDigitCharacters(count.text()));
return Long.parseLong(Utils.removeNonDigitCharacters(playlistInfoItem.getString("videoCount")));
} catch (Exception e) {
throw new ParsingException("Failed to extract playlist stream count", e);
throw new ParsingException("Could not get stream count", e);
}
}
}

View File

@ -1,26 +1,24 @@
package org.schabi.newpipe.extractor.services.youtube.extractors;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import org.schabi.newpipe.extractor.InfoItem;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.downloader.Response;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.linkhandler.SearchQueryHandler;
import org.schabi.newpipe.extractor.localization.TimeAgoParser;
import org.schabi.newpipe.extractor.search.InfoItemsSearchCollector;
import org.schabi.newpipe.extractor.search.SearchExtractor;
import org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeParsingHelper;
import org.schabi.newpipe.extractor.utils.Parser;
import java.io.IOException;
import javax.annotation.Nonnull;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.MalformedURLException;
import java.net.URL;
import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeParsingHelper.getJsonResponse;
import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeParsingHelper.getTextFromObject;
/*
* Created by Christian Schabesberger on 22.07.2018
@ -43,8 +41,7 @@ import java.net.URL;
*/
public class YoutubeSearchExtractor extends SearchExtractor {
private Document doc;
private JsonObject initialData;
public YoutubeSearchExtractor(StreamingService service, SearchQueryHandler linkHandler) {
super(service, linkHandler);
@ -52,9 +49,11 @@ public class YoutubeSearchExtractor extends SearchExtractor {
@Override
public void onFetchPage(@Nonnull Downloader downloader) throws IOException, ExtractionException {
final String url = getUrl();
final Response response = downloader.get(url, getExtractorLocalization());
doc = YoutubeParsingHelper.parseAndCheckPage(url, response);
final String url = getUrl() + "&pbj=1";
final JsonArray ajaxJson = getJsonResponse(url, getExtractorLocalization());
initialData = ajaxJson.getObject(1).getObject("response");
}
@Nonnull
@ -64,81 +63,85 @@ public class YoutubeSearchExtractor extends SearchExtractor {
}
@Override
public String getSearchSuggestion() {
final Element el = doc.select("div[class*=\"spell-correction\"]").first();
if (el != null) {
return el.select("a").first().text();
} else {
public String getSearchSuggestion() throws ParsingException {
JsonObject showingResultsForRenderer = initialData.getObject("contents")
.getObject("twoColumnSearchResultsRenderer").getObject("primaryContents")
.getObject("sectionListRenderer").getArray("contents").getObject(0)
.getObject("itemSectionRenderer").getArray("contents").getObject(0)
.getObject("showingResultsForRenderer");
if (showingResultsForRenderer == null) {
return "";
} else {
return getTextFromObject(showingResultsForRenderer.getObject("correctedQuery"));
}
}
@Nonnull
@Override
public InfoItemsPage<InfoItem> getInitialPage() throws ExtractionException {
return new InfoItemsPage<>(collectItems(doc), getNextPageUrl());
InfoItemsSearchCollector collector = getInfoItemSearchCollector();
JsonArray sections = initialData.getObject("contents").getObject("twoColumnSearchResultsRenderer")
.getObject("primaryContents").getObject("sectionListRenderer").getArray("contents");
for (Object section : sections) {
collectStreamsFrom(collector, ((JsonObject) section).getObject("itemSectionRenderer").getArray("contents"));
}
return new InfoItemsPage<>(collector, getNextPageUrl());
}
@Override
public String getNextPageUrl() throws ExtractionException {
return getUrl() + "&page=" + 2;
return getNextPageUrlFrom(initialData.getObject("contents").getObject("twoColumnSearchResultsRenderer")
.getObject("primaryContents").getObject("sectionListRenderer").getArray("contents")
.getObject(0).getObject("itemSectionRenderer").getArray("continuations"));
}
@Override
public InfoItemsPage<InfoItem> getPage(String pageUrl) throws IOException, ExtractionException {
final String response = getDownloader().get(pageUrl, getExtractorLocalization()).responseBody();
doc = Jsoup.parse(response, pageUrl);
return new InfoItemsPage<>(collectItems(doc), getNextPageUrlFromCurrentUrl(pageUrl));
}
private String getNextPageUrlFromCurrentUrl(String currentUrl)
throws MalformedURLException, UnsupportedEncodingException {
final int pageNr = Integer.parseInt(
Parser.compatParseMap(
new URL(currentUrl)
.getQuery())
.get("page"));
return currentUrl.replace("&page=" + pageNr,
"&page=" + Integer.toString(pageNr + 1));
}
private InfoItemsSearchCollector collectItems(Document doc) throws NothingFoundException {
InfoItemsSearchCollector collector = getInfoItemSearchCollector();
collector.reset();
Element list = doc.select("ol[class=\"item-section\"]").first();
final TimeAgoParser timeAgoParser = getTimeAgoParser();
for (Element item : list.children()) {
/* First we need to determine which kind of item we are working with.
Youtube depicts five different kinds of items on its search result page. These are
regular videos, playlists, channels, two types of video suggestions, and a "no video
found" item. Since we only want videos, we need to filter out all the others.
An example for this can be seen here:
https://www.youtube.com/results?search_query=asdf&page=1
We already applied a filter to the url, so we don't need to care about channels and
playlists now.
*/
Element el;
if ((el = item.select("div[class*=\"search-message\"]").first()) != null) {
throw new NothingFoundException(el.text());
// video item type
} else if ((el = item.select("div[class*=\"yt-lockup-video\"]").first()) != null) {
collector.commit(new YoutubeStreamInfoItemExtractor(el, timeAgoParser));
} else if ((el = item.select("div[class*=\"yt-lockup-channel\"]").first()) != null) {
collector.commit(new YoutubeChannelInfoItemExtractor(el));
} else if ((el = item.select("div[class*=\"yt-lockup-playlist\"]").first()) != null &&
item.select(".yt-pl-icon-mix").isEmpty()) {
collector.commit(new YoutubePlaylistInfoItemExtractor(el));
}
if (pageUrl == null || pageUrl.isEmpty()) {
throw new ExtractionException(new IllegalArgumentException("Page url is empty or null"));
}
return collector;
InfoItemsSearchCollector collector = getInfoItemSearchCollector();
final JsonArray ajaxJson = getJsonResponse(pageUrl, getExtractorLocalization());
JsonObject itemSectionRenderer = ajaxJson.getObject(1).getObject("response")
.getObject("continuationContents").getObject("itemSectionContinuation");
collectStreamsFrom(collector, itemSectionRenderer.getArray("contents"));
return new InfoItemsPage<>(collector, getNextPageUrlFrom(itemSectionRenderer.getArray("continuations")));
}
private void collectStreamsFrom(InfoItemsSearchCollector collector, JsonArray videos) throws NothingFoundException, ParsingException {
collector.reset();
final TimeAgoParser timeAgoParser = getTimeAgoParser();
for (Object item : videos) {
if (((JsonObject) item).getObject("backgroundPromoRenderer") != null) {
throw new NothingFoundException(getTextFromObject(((JsonObject) item)
.getObject("backgroundPromoRenderer").getObject("bodyText")));
} else if (((JsonObject) item).getObject("videoRenderer") != null) {
collector.commit(new YoutubeStreamInfoItemExtractor(((JsonObject) item).getObject("videoRenderer"), timeAgoParser));
} else if (((JsonObject) item).getObject("channelRenderer") != null) {
collector.commit(new YoutubeChannelInfoItemExtractor(((JsonObject) item).getObject("channelRenderer")));
} else if (((JsonObject) item).getObject("playlistRenderer") != null) {
collector.commit(new YoutubePlaylistInfoItemExtractor(((JsonObject) item).getObject("playlistRenderer")));
}
}
}
private String getNextPageUrlFrom(JsonArray continuations) throws ParsingException {
if (continuations == null) {
return "";
}
JsonObject nextContinuationData = continuations.getObject(0).getObject("nextContinuationData");
String continuation = nextContinuationData.getString("continuation");
String clickTrackingParams = nextContinuationData.getString("clickTrackingParams");
return getUrl() + "&pbj=1&ctoken=" + continuation + "&continuation=" + continuation
+ "&itct=" + clickTrackingParams;
}
}

View File

@ -3,11 +3,7 @@ package org.schabi.newpipe.extractor.services.youtube.extractors;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import com.grack.nanojson.JsonParserException;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.mozilla.javascript.Context;
import org.mozilla.javascript.Function;
import org.mozilla.javascript.ScriptableObject;
@ -15,31 +11,52 @@ import org.schabi.newpipe.extractor.MediaFormat;
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.downloader.Request;
import org.schabi.newpipe.extractor.downloader.Response;
import org.schabi.newpipe.extractor.exceptions.ContentNotAvailableException;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.exceptions.ReCaptchaException;
import org.schabi.newpipe.extractor.linkhandler.LinkHandler;
import org.schabi.newpipe.extractor.localization.DateWrapper;
import org.schabi.newpipe.extractor.localization.Localization;
import org.schabi.newpipe.extractor.localization.TimeAgoParser;
import org.schabi.newpipe.extractor.localization.TimeAgoPatternsManager;
import org.schabi.newpipe.extractor.services.youtube.ItagItem;
import org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeChannelLinkHandlerFactory;
import org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeParsingHelper;
import org.schabi.newpipe.extractor.stream.*;
import org.schabi.newpipe.extractor.stream.AudioStream;
import org.schabi.newpipe.extractor.stream.Description;
import org.schabi.newpipe.extractor.stream.Frameset;
import org.schabi.newpipe.extractor.stream.Stream;
import org.schabi.newpipe.extractor.stream.StreamExtractor;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import org.schabi.newpipe.extractor.stream.StreamType;
import org.schabi.newpipe.extractor.stream.SubtitlesStream;
import org.schabi.newpipe.extractor.stream.VideoStream;
import org.schabi.newpipe.extractor.utils.JsonUtils;
import org.schabi.newpipe.extractor.utils.Parser;
import org.schabi.newpipe.extractor.utils.Utils;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeParsingHelper.fixThumbnailUrl;
import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeParsingHelper.getJsonResponse;
import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeParsingHelper.getTextFromObject;
import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeParsingHelper.getUrlFromNavigationEndpoint;
/*
* Created by Christian Schabesberger on 06.08.15.
@ -62,8 +79,6 @@ import java.util.regex.Pattern;
*/
public class YoutubeStreamExtractor extends StreamExtractor {
private static final String TAG = YoutubeStreamExtractor.class.getSimpleName();
/*//////////////////////////////////////////////////////////////////////////
// Exceptions
//////////////////////////////////////////////////////////////////////////*/
@ -74,26 +89,22 @@ public class YoutubeStreamExtractor extends StreamExtractor {
}
}
public class SubtitlesException extends ContentNotAvailableException {
SubtitlesException(String message, Throwable cause) {
super(message, cause);
}
}
/*//////////////////////////////////////////////////////////////////////////*/
private Document doc;
private JsonArray initialAjaxJson;
@Nullable
private JsonObject playerArgs;
@Nonnull
private final Map<String, String> videoInfoPage = new HashMap<>();
private JsonObject playerResponse;
private JsonObject initialData;
private JsonObject videoPrimaryInfoRenderer;
private JsonObject videoSecondaryInfoRenderer;
private int ageLimit;
@Nonnull
private List<SubtitlesInfo> subtitlesInfos = new ArrayList<>();
private boolean isAgeRestricted;
public YoutubeStreamExtractor(StreamingService service, LinkHandler linkHandler) {
super(service, linkHandler);
}
@ -106,21 +117,21 @@ public class YoutubeStreamExtractor extends StreamExtractor {
@Override
public String getName() throws ParsingException {
assertPageFetched();
try {
return playerResponse.getObject("videoDetails").getString("title");
String title = null;
} catch (Exception e) {
// fallback HTML method
String name = null;
try {
title = getTextFromObject(getVideoPrimaryInfoRenderer().getObject("title"));
} catch (Exception ignored) {}
if (title == null) {
try {
name = doc.select("meta[name=title]").attr(CONTENT);
title = playerResponse.getObject("videoDetails").getString("title");
} catch (Exception ignored) {}
if (name == null) {
throw new ParsingException("Could not get name", e);
}
return name;
if (title == null) throw new ParsingException("Could not get name");
}
return title;
}
@Override
@ -130,18 +141,39 @@ public class YoutubeStreamExtractor extends StreamExtractor {
}
try {
return playerResponse.getObject("microformat").getObject("playerMicroformatRenderer").getString("publishDate");
} catch (Exception e) {
String uploadDate = null;
try {
uploadDate = doc.select("meta[itemprop=datePublished]").attr(CONTENT);
} catch (Exception ignored) {}
if (uploadDate == null) {
throw new ParsingException("Could not get upload date", e);
JsonObject micro = playerResponse.getObject("microformat").getObject("playerMicroformatRenderer");
if (micro.getString("uploadDate") != null && !micro.getString("uploadDate").isEmpty()) {
return micro.getString("uploadDate");
}
return uploadDate;
}
if (micro.getString("publishDate") != null && !micro.getString("publishDate").isEmpty()) {
return micro.getString("publishDate");
}
} catch (Exception ignored) {}
try {
if (getTextFromObject(getVideoPrimaryInfoRenderer().getObject("dateText")).startsWith("Premiered")) {
String time = getTextFromObject(getVideoPrimaryInfoRenderer().getObject("dateText")).substring(10);
try { // Premiered 20 hours ago
TimeAgoParser timeAgoParser = TimeAgoPatternsManager.getTimeAgoParserFor(Localization.fromLocalizationCode("en"));
Calendar parsedTime = timeAgoParser.parse(time).date();
return new SimpleDateFormat("yyyy-MM-dd").format(parsedTime.getTime());
} catch (Exception ignored) {}
try { // Premiered Feb 21, 2020
Date d = new SimpleDateFormat("MMM dd, YYYY", Locale.ENGLISH).parse(time);
return new SimpleDateFormat("yyyy-MM-dd").format(d.getTime());
} catch (Exception ignored) {}
}
} catch (Exception ignored) {}
try {
// TODO this parses English formatted dates only, we need a better approach to parse the textual date
Date d = new SimpleDateFormat("dd MMM yyyy", Locale.ENGLISH).parse(
getTextFromObject(getVideoPrimaryInfoRenderer().getObject("dateText")));
return new SimpleDateFormat("yyyy-MM-dd").format(d);
} catch (Exception ignored) {}
throw new ParsingException("Could not get upload date");
}
@Override
@ -162,122 +194,38 @@ public class YoutubeStreamExtractor extends StreamExtractor {
try {
JsonArray thumbnails = playerResponse.getObject("videoDetails").getObject("thumbnail").getArray("thumbnails");
// the last thumbnail is the one with the highest resolution
return thumbnails.getObject(thumbnails.size() - 1).getString("url");
String url = thumbnails.getObject(thumbnails.size() - 1).getString("url");
return fixThumbnailUrl(url);
} catch (Exception e) {
String url = null;
try {
url = doc.select("link[itemprop=\"thumbnailUrl\"]").first().attr("abs:href");
} catch (Exception ignored) {}
if (url == null) {
throw new ParsingException("Could not get thumbnail url", e);
}
return url;
throw new ParsingException("Could not get thumbnail url");
}
}
@Nonnull
@Override
public String getDescription() throws ParsingException {
public Description getDescription() throws ParsingException {
assertPageFetched();
// description with more info on links
try {
// first try to get html-formatted description
return parseHtmlAndGetFullLinks(doc.select("p[id=\"eow-description\"]").first().html());
} catch (Exception e) {
try {
// fallback to raw non-html description
return playerResponse.getObject("videoDetails").getString("shortDescription");
} catch (Exception ignored) {
throw new ParsingException("Could not get the description", e);
}
String description = getTextFromObject(getVideoSecondaryInfoRenderer().getObject("description"), true);
return new Description(description, Description.HTML);
} catch (Exception ignored) { }
// raw non-html description
try {
return new Description(playerResponse.getObject("videoDetails").getString("shortDescription"), Description.PLAIN_TEXT);
} catch (Exception ignored) {
throw new ParsingException("Could not get description");
}
}
// onclick="yt.www.watch.player.seekTo(0*3600+00*60+00);return false;"
// :00 is NOT recognized as a timestamp in description or comments.
// 0:00 is recognized in both description and comments.
// https://www.youtube.com/watch?v=4cccfDXu1vA
private final static Pattern DESCRIPTION_TIMESTAMP_ONCLICK_REGEX = Pattern.compile(
"seekTo\\("
+ "(?:(\\d+)\\*3600\\+)?" // hours?
+ "(\\d+)\\*60\\+" // minutes
+ "(\\d+)" // seconds
+ "\\)");
@SafeVarargs
private static <T> T coalesce(T... args) {
for (T arg : args) {
if (arg != null) return arg;
}
throw new IllegalArgumentException("all arguments to coalesce() were null");
}
private String parseHtmlAndGetFullLinks(String descriptionHtml)
throws MalformedURLException, UnsupportedEncodingException, ParsingException {
final Document description = Jsoup.parse(descriptionHtml, getUrl());
for(Element a : description.select("a")) {
final String rawUrl = a.attr("abs:href");
final URL redirectLink = new URL(rawUrl);
final Matcher onClickTimestamp;
final String queryString;
if ((onClickTimestamp = DESCRIPTION_TIMESTAMP_ONCLICK_REGEX.matcher(a.attr("onclick")))
.find()) {
a.removeAttr("onclick");
String hours = coalesce(onClickTimestamp.group(1), "0");
String minutes = onClickTimestamp.group(2);
String seconds = onClickTimestamp.group(3);
int timestamp = 0;
timestamp += Integer.parseInt(hours) * 3600;
timestamp += Integer.parseInt(minutes) * 60;
timestamp += Integer.parseInt(seconds);
String setTimestamp = "&t=" + timestamp;
// Even after clicking https://youtu.be/...?t=6,
// getUrl() is https://www.youtube.com/watch?v=..., never youtu.be, never &t=.
a.attr("href", getUrl() + setTimestamp);
} else if((queryString = redirectLink.getQuery()) != null) {
// if the query string is null we are not dealing with a redirect link,
// so we don't need to override it.
final String link =
Parser.compatParseMap(queryString).get("q");
if(link != null) {
// if link is null the a tag is a hashtag.
// They refer to the youtube search. We do not handle them.
a.text(link);
a.attr("href", link);
} else if(redirectLink.toString().contains("https://www.youtube.com/")) {
a.text(redirectLink.toString());
a.attr("href", redirectLink.toString());
}
} else if(redirectLink.toString().contains("https://www.youtube.com/")) {
descriptionHtml = descriptionHtml.replace(rawUrl, redirectLink.toString());
a.text(redirectLink.toString());
a.attr("href", redirectLink.toString());
}
}
return description.select("body").first().html();
}
@Override
public int getAgeLimit() throws ParsingException {
assertPageFetched();
if (!isAgeRestricted) {
return NO_AGE_LIMIT;
}
try {
return Integer.valueOf(doc.select("meta[property=\"og:restrictions:age\"]")
.attr(CONTENT).replace("+", ""));
} catch (Exception e) {
throw new ParsingException("Could not get age restriction");
}
public int getAgeLimit() {
if (initialData == null || initialData.isEmpty()) throw new IllegalStateException("initialData is not parsed yet");
return ageLimit;
}
@Override
@ -316,68 +264,22 @@ public class YoutubeStreamExtractor extends StreamExtractor {
@Override
public long getViewCount() throws ParsingException {
assertPageFetched();
String views = null;
try {
if (getStreamType().equals(StreamType.LIVE_STREAM)) {
return getLiveStreamWatchingCount();
} else {
return Long.parseLong(playerResponse.getObject("videoDetails").getString("viewCount"));
}
} catch (Exception e) {
views = getTextFromObject(getVideoPrimaryInfoRenderer().getObject("viewCount")
.getObject("videoViewCountRenderer").getObject("viewCount"));
} catch (Exception ignored) {}
if (views == null) {
try {
return Long.parseLong(doc.select("meta[itemprop=interactionCount]").attr(CONTENT));
} catch (Exception ignored) {
throw new ParsingException("Could not get view count", e);
}
}
}
views = playerResponse.getObject("videoDetails").getString("viewCount");
} catch (Exception ignored) {}
private long getLiveStreamWatchingCount() throws ExtractionException, IOException, JsonParserException {
// https://www.youtube.com/youtubei/v1/updated_metadata?alt=json&key=
String innerTubeKey = null, clientVersion = null;
if (playerArgs != null && !playerArgs.isEmpty()) {
innerTubeKey = playerArgs.getString("innertube_api_key");
clientVersion = playerArgs.getString("innertube_context_client_version");
} else if (!videoInfoPage.isEmpty()) {
innerTubeKey = videoInfoPage.get("innertube_api_key");
clientVersion = videoInfoPage.get("innertube_context_client_version");
if (views == null) throw new ParsingException("Could not get view count");
}
if (innerTubeKey == null || innerTubeKey.isEmpty()) {
throw new ExtractionException("Couldn't get innerTube key");
}
if (clientVersion == null || clientVersion.isEmpty()) {
throw new ExtractionException("Couldn't get innerTube client version");
}
final String metadataUrl = "https://www.youtube.com/youtubei/v1/updated_metadata?alt=json&key=" + innerTubeKey;
final byte[] dataBody = ("{\"context\":{\"client\":{\"clientName\":1,\"clientVersion\":\"" + clientVersion + "\"}}" +
",\"videoId\":\"" + getId() + "\"}").getBytes("UTF-8");
final Response response = getDownloader().execute(Request.newBuilder()
.post(metadataUrl, dataBody)
.addHeader("Content-Type", "application/json")
.build());
final JsonObject jsonObject = JsonParser.object().from(response.responseBody());
for (Object actionEntry : jsonObject.getArray("actions")) {
if (!(actionEntry instanceof JsonObject)) continue;
final JsonObject entry = (JsonObject) actionEntry;
final JsonObject updateViewershipAction = entry.getObject("updateViewershipAction", null);
if (updateViewershipAction == null) continue;
final JsonArray viewCountRuns = JsonUtils.getArray(updateViewershipAction, "viewership.videoViewCountRenderer.viewCount.runs");
if (viewCountRuns.isEmpty()) continue;
final JsonObject textObject = viewCountRuns.getObject(0);
if (!textObject.has("text")) {
throw new ExtractionException("Response don't have \"text\" element");
}
return Long.parseLong(Utils.removeNonDigitCharacters(textObject.getString("text")));
}
throw new ExtractionException("Could not find correct results in response");
return Long.parseLong(Utils.removeNonDigitCharacters(views));
}
@Override
@ -385,9 +287,9 @@ public class YoutubeStreamExtractor extends StreamExtractor {
assertPageFetched();
String likesString = "";
try {
Element button = doc.select("button.like-button-renderer-like-button").first();
try {
likesString = button.select("span.yt-uix-button-content").first().text();
likesString = getVideoPrimaryInfoRenderer().getObject("sentimentBar")
.getObject("sentimentBarRenderer").getString("tooltip").split("/")[0];
} catch (NullPointerException e) {
//if this kicks in our button has no content and therefore ratings must be disabled
if (playerResponse.getObject("videoDetails").getBoolean("allowRatings")) {
@ -408,9 +310,9 @@ public class YoutubeStreamExtractor extends StreamExtractor {
assertPageFetched();
String dislikesString = "";
try {
Element button = doc.select("button.like-button-renderer-dislike-button").first();
try {
dislikesString = button.select("span.yt-uix-button-content").first().text();
dislikesString = getVideoPrimaryInfoRenderer().getObject("sentimentBar")
.getObject("sentimentBarRenderer").getString("tooltip").split("/")[1];
} catch (NullPointerException e) {
//if this kicks in our button has no content and therefore ratings must be disabled
if (playerResponse.getObject("videoDetails").getBoolean("allowRatings")) {
@ -431,59 +333,52 @@ public class YoutubeStreamExtractor extends StreamExtractor {
public String getUploaderUrl() throws ParsingException {
assertPageFetched();
try {
return "https://www.youtube.com/channel/" +
playerResponse.getObject("videoDetails").getString("channelId");
} catch (Exception e) {
String uploaderUrl = null;
try {
uploaderUrl = doc.select("div[class=\"yt-user-info\"]").first().children()
.select("a").first().attr("abs:href");
} catch (Exception ignored) {}
if (uploaderUrl == null) {
throw new ParsingException("Could not get channel link", e);
}
return uploaderUrl;
}
String uploaderUrl = getUrlFromNavigationEndpoint(getVideoSecondaryInfoRenderer()
.getObject("owner").getObject("videoOwnerRenderer").getObject("navigationEndpoint"));
if (uploaderUrl != null) return uploaderUrl;
} catch (Exception ignored) {}
try {
String uploaderId = playerResponse.getObject("videoDetails").getString("channelId");
if (uploaderId != null)
return YoutubeChannelLinkHandlerFactory.getInstance().getUrl("channel/" + uploaderId);
} catch (Exception ignored) {}
throw new ParsingException("Could not get uploader url");
}
@Nonnull
@Override
public String getUploaderName() throws ParsingException {
assertPageFetched();
String uploaderName = null;
try {
return playerResponse.getObject("videoDetails").getString("author");
} catch (Exception e) {
String name = null;
uploaderName = getTextFromObject(getVideoSecondaryInfoRenderer().getObject("owner")
.getObject("videoOwnerRenderer").getObject("title"));
} catch (Exception ignored) {}
if (uploaderName == null) {
try {
name = doc.select("div.yt-user-info").first().text();
uploaderName = playerResponse.getObject("videoDetails").getString("author");
} catch (Exception ignored) {}
if (name == null) {
throw new ParsingException("Could not get uploader name");
}
return name;
if (uploaderName == null) throw new ParsingException("Could not get uploader name");
}
return uploaderName;
}
@Nonnull
@Override
public String getUploaderAvatarUrl() throws ParsingException {
assertPageFetched();
String uploaderAvatarUrl = null;
try {
uploaderAvatarUrl = doc.select("a[class*=\"yt-user-photo\"]").first()
.select("img").first()
.attr("abs:data-thumb");
} catch (Exception e) {//todo: add fallback method
String url = getVideoSecondaryInfoRenderer().getObject("owner").getObject("videoOwnerRenderer")
.getObject("thumbnail").getArray("thumbnails").getObject(0).getString("url");
return fixThumbnailUrl(url);
} catch (Exception e) {
throw new ParsingException("Could not get uploader avatar url", e);
}
if (uploaderAvatarUrl == null) {
throw new ParsingException("Could not get uploader avatar url");
}
return uploaderAvatarUrl;
}
@Nonnull
@ -592,13 +487,13 @@ public class YoutubeStreamExtractor extends StreamExtractor {
@Override
@Nonnull
public List<SubtitlesStream> getSubtitlesDefault() throws IOException, ExtractionException {
public List<SubtitlesStream> getSubtitlesDefault() {
return getSubtitles(MediaFormat.TTML);
}
@Override
@Nonnull
public List<SubtitlesStream> getSubtitles(final MediaFormat format) throws IOException, ExtractionException {
public List<SubtitlesStream> getSubtitles(final MediaFormat format) {
assertPageFetched();
List<SubtitlesStream> subtitles = new ArrayList<>();
for (final SubtitlesInfo subtitlesInfo : subtitlesInfos) {
@ -622,18 +517,28 @@ public class YoutubeStreamExtractor extends StreamExtractor {
}
@Override
public StreamInfoItem getNextStream() throws IOException, ExtractionException {
public StreamInfoItem getNextStream() throws ExtractionException {
assertPageFetched();
try {
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
final TimeAgoParser timeAgoParser = getTimeAgoParser();
Elements watch = doc.select("div[class=\"watch-sidebar-section\"]");
if (watch.size() < 1) {
return null;// prevent the snackbar notification "report error" on age-restricted videos
if (getAgeLimit() != NO_AGE_LIMIT) return null;
try {
final JsonObject firstWatchNextItem = initialData.getObject("contents")
.getObject("twoColumnWatchNextResults").getObject("secondaryResults")
.getObject("secondaryResults").getArray("results").getObject(0);
if (!firstWatchNextItem.has("compactAutoplayRenderer")) {
// there is no "next" stream
return null;
}
collector.commit(extractVideoPreviewInfo(watch.first().select("li").first(), timeAgoParser));
final JsonObject videoInfo = firstWatchNextItem.getObject("compactAutoplayRenderer")
.getArray("contents").getObject(0).getObject("compactVideoRenderer");
final TimeAgoParser timeAgoParser = getTimeAgoParser();
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
collector.commit(new YoutubeStreamInfoItemExtractor(videoInfo, timeAgoParser));
return collector.getItems().get(0);
} catch (Exception e) {
throw new ParsingException("Could not get next video", e);
@ -641,20 +546,22 @@ public class YoutubeStreamExtractor extends StreamExtractor {
}
@Override
public StreamInfoItemsCollector getRelatedStreams() throws IOException, ExtractionException {
public StreamInfoItemsCollector getRelatedStreams() throws ExtractionException {
assertPageFetched();
if (getAgeLimit() != NO_AGE_LIMIT) return null;
try {
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
JsonArray results = initialData.getObject("contents").getObject("twoColumnWatchNextResults")
.getObject("secondaryResults").getObject("secondaryResults").getArray("results");
final TimeAgoParser timeAgoParser = getTimeAgoParser();
Element ul = doc.select("ul[id=\"watch-related\"]").first();
if (ul != null) {
for (Element li : ul.children()) {
// first check if we have a playlist. If so leave them out
if (li.select("a[class*=\"content-link\"]").first() != null) {
collector.commit(extractVideoPreviewInfo(li, timeAgoParser));
}
}
for (Object ul : results) {
final JsonObject videoInfo = ((JsonObject) ul).getObject("compactVideoRenderer");
if (videoInfo != null) collector.commit(new YoutubeStreamInfoItemExtractor(videoInfo, timeAgoParser));
}
return collector;
} catch (Exception e) {
@ -667,23 +574,12 @@ public class YoutubeStreamExtractor extends StreamExtractor {
*/
@Override
public String getErrorMessage() {
StringBuilder errorReason;
Element errorElement = doc.select("h1[id=\"unavailable-message\"]").first();
if (errorElement == null) {
errorReason = null;
} else {
String errorMessage = errorElement.text();
if (errorMessage == null || errorMessage.isEmpty()) {
errorReason = null;
} else {
errorReason = new StringBuilder(errorMessage);
errorReason.append(" ");
errorReason.append(doc.select("[id=\"unavailable-submessage\"]").first().text());
}
try {
return getTextFromObject(initialAjaxJson.getObject(2).getObject("playerResponse").getObject("playabilityStatus")
.getObject("errorScreen").getObject("playerErrorMessageRenderer").getObject("reason"));
} catch (ParsingException e) {
return null;
}
return errorReason != null ? errorReason.toString() : "";
}
/*//////////////////////////////////////////////////////////////////////////
@ -693,11 +589,8 @@ public class YoutubeStreamExtractor extends StreamExtractor {
private static final String FORMATS = "formats";
private static final String ADAPTIVE_FORMATS = "adaptiveFormats";
private static final String HTTPS = "https:";
private static final String CONTENT = "content";
private static final String DECRYPTION_FUNC_NAME = "decrypt";
private static final String VERIFIED_URL_PARAMS = "&has_verified=1&bpctr=9999999999";
private final static String DECRYPTION_SIGNATURE_FUNCTION_REGEX =
"([\\w$]+)\\s*=\\s*function\\((\\w+)\\)\\{\\s*\\2=\\s*\\2\\.split\\(\"\"\\)\\s*;";
private final static String DECRYPTION_SIGNATURE_FUNCTION_REGEX_2 =
@ -709,32 +602,41 @@ public class YoutubeStreamExtractor extends StreamExtractor {
private volatile String decryptionCode = "";
private String pageHtml = null;
@Override
public void onFetchPage(@Nonnull Downloader downloader) throws IOException, ExtractionException {
final String verifiedUrl = getUrl() + VERIFIED_URL_PARAMS;
final Response response = downloader.get(verifiedUrl, getExtractorLocalization());
pageHtml = response.responseBody();
doc = YoutubeParsingHelper.parseAndCheckPage(verifiedUrl, response);
final String url = getUrl() + "&pbj=1";
initialAjaxJson = getJsonResponse(url, getExtractorLocalization());
final String playerUrl;
// Check if the video is age restricted
if (!doc.select("meta[property=\"og:restrictions:age\"]").isEmpty()) {
if (initialAjaxJson.getObject(2).getObject("response") != null) { // age-restricted videos
initialData = initialAjaxJson.getObject(2).getObject("response");
ageLimit = 18;
final EmbeddedInfo info = getEmbeddedInfo();
final String videoInfoUrl = getVideoInfoUrl(getId(), info.sts);
final String infoPageResponse = downloader.get(videoInfoUrl, getExtractorLocalization()).responseBody();
videoInfoPage.putAll(Parser.compatParseMap(infoPageResponse));
playerUrl = info.url;
isAgeRestricted = true;
} else {
final JsonObject ytPlayerConfig = getPlayerConfig();
playerArgs = getPlayerArgs(ytPlayerConfig);
playerUrl = getPlayerUrl(ytPlayerConfig);
isAgeRestricted = false;
initialData = initialAjaxJson.getObject(3).getObject("response");
ageLimit = NO_AGE_LIMIT;
playerArgs = getPlayerArgs(initialAjaxJson.getObject(2).getObject("player"));
playerUrl = getPlayerUrl(initialAjaxJson.getObject(2).getObject("player"));
}
playerResponse = getPlayerResponse();
final JsonObject playabilityStatus = playerResponse.getObject("playabilityStatus", JsonUtils.DEFAULT_EMPTY);
final String status = playabilityStatus.getString("status");
// If status exist, and is not "OK", throw a ContentNotAvailableException with the reason.
if (status != null && !status.toLowerCase().equals("ok")) {
final String reason = playabilityStatus.getString("reason");
throw new ContentNotAvailableException("Got error: \"" + reason + "\"");
}
if (decryptionCode.isEmpty()) {
decryptionCode = loadDecryptionCode(playerUrl);
}
@ -744,23 +646,6 @@ public class YoutubeStreamExtractor extends StreamExtractor {
}
}
private JsonObject getPlayerConfig() throws ParsingException {
try {
String ytPlayerConfigRaw = Parser.matchGroup1("ytplayer.config\\s*=\\s*(\\{.*?\\});", pageHtml);
return JsonParser.object().from(ytPlayerConfigRaw);
} catch (Parser.RegexException e) {
String errorReason = getErrorMessage();
switch (errorReason) {
case "":
throw new ContentNotAvailableException("Content not available: player config empty", e);
default:
throw new ContentNotAvailableException("Content not available", e);
}
} catch (Exception e) {
throw new ParsingException("Could not parse yt player config", e);
}
}
private JsonObject getPlayerArgs(JsonObject playerConfig) throws ParsingException {
JsonObject playerArgs;
@ -910,9 +795,9 @@ public class YoutubeStreamExtractor extends StreamExtractor {
}
@Nonnull
private List<SubtitlesInfo> getAvailableSubtitlesInfo() throws SubtitlesException {
private List<SubtitlesInfo> getAvailableSubtitlesInfo() {
// If the video is age restricted getPlayerConfig will fail
if (isAgeRestricted) return Collections.emptyList();
if (getAgeLimit() != NO_AGE_LIMIT) return Collections.emptyList();
final JsonObject captions;
if (!playerResponse.has("captions")) {
@ -924,7 +809,7 @@ public class YoutubeStreamExtractor extends StreamExtractor {
final JsonObject renderer = captions.getObject("playerCaptionsTracklistRenderer", new JsonObject());
final JsonArray captionsArray = renderer.getArray("captionTracks", new JsonArray());
// todo: use this to apply auto translation to different language from a source language
final JsonArray autoCaptionsArray = renderer.getArray("translationLanguages", new JsonArray());
// final JsonArray autoCaptionsArray = renderer.getArray("translationLanguages", new JsonArray());
// This check is necessary since there may be cases where subtitles metadata do not contain caption track info
// e.g. https://www.youtube.com/watch?v=-Vpwatutnko
@ -981,6 +866,50 @@ public class YoutubeStreamExtractor extends StreamExtractor {
// Utils
//////////////////////////////////////////////////////////////////////////*/
private JsonObject getVideoPrimaryInfoRenderer() throws ParsingException {
if (this.videoPrimaryInfoRenderer != null) return this.videoPrimaryInfoRenderer;
JsonArray contents = initialData.getObject("contents").getObject("twoColumnWatchNextResults")
.getObject("results").getObject("results").getArray("contents");
JsonObject videoPrimaryInfoRenderer = null;
for (Object content : contents) {
if (((JsonObject) content).getObject("videoPrimaryInfoRenderer") != null) {
videoPrimaryInfoRenderer = ((JsonObject) content).getObject("videoPrimaryInfoRenderer");
break;
}
}
if (videoPrimaryInfoRenderer == null) {
throw new ParsingException("Could not find videoPrimaryInfoRenderer");
}
this.videoPrimaryInfoRenderer = videoPrimaryInfoRenderer;
return videoPrimaryInfoRenderer;
}
private JsonObject getVideoSecondaryInfoRenderer() throws ParsingException {
if (this.videoSecondaryInfoRenderer != null) return this.videoSecondaryInfoRenderer;
JsonArray contents = initialData.getObject("contents").getObject("twoColumnWatchNextResults")
.getObject("results").getObject("results").getArray("contents");
JsonObject videoSecondaryInfoRenderer = null;
for (Object content : contents) {
if (((JsonObject) content).getObject("videoSecondaryInfoRenderer") != null) {
videoSecondaryInfoRenderer = ((JsonObject) content).getObject("videoSecondaryInfoRenderer");
break;
}
}
if (videoSecondaryInfoRenderer == null) {
throw new ParsingException("Could not find videoSecondaryInfoRenderer");
}
this.videoSecondaryInfoRenderer = videoSecondaryInfoRenderer;
return videoSecondaryInfoRenderer;
}
@Nonnull
private static String getVideoInfoUrl(final String id, final String sts) {
return "https://www.youtube.com/get_video_info?" + "video_id=" + id +
@ -1015,123 +944,98 @@ public class YoutubeStreamExtractor extends StreamExtractor {
urlAndItags.put(streamUrl, itagItem);
}
} catch (UnsupportedEncodingException ignored) {
}
} catch (UnsupportedEncodingException ignored) {}
}
}
return urlAndItags;
}
/**
* Provides information about links to other videos on the video page, such as related videos.
* This is encapsulated in a StreamInfoItem object, which is a subset of the fields in a full StreamInfo.
*/
private StreamInfoItemExtractor extractVideoPreviewInfo(final Element li, final TimeAgoParser timeAgoParser) {
return new YoutubeStreamInfoItemExtractor(li, timeAgoParser) {
@Override
public String getUrl() throws ParsingException {
return li.select("a.content-link").first().attr("abs:href");
}
@Override
public String getName() throws ParsingException {
//todo: check NullPointerException causing
return li.select("span.title").first().text();
//this page causes the NullPointerException, after finding it by searching for "tjvg":
//https://www.youtube.com/watch?v=Uqg0aEhLFAg
}
@Override
public String getUploaderName() throws ParsingException {
return li.select("span[class*=\"attribution\"").first()
.select("span").first().text();
}
@Override
public String getUploaderUrl() throws ParsingException {
return ""; // The uploader is not linked
}
@Override
public String getTextualUploadDate() throws ParsingException {
return "";
}
@Override
public String getThumbnailUrl() throws ParsingException {
Element img = li.select("img").first();
String thumbnailUrl = img.attr("abs:src");
// Sometimes youtube sends links to gif files which somehow seem to not exist
// anymore. Items with such gif also offer a secondary image source. So we are going
// to use that if we caught such an item.
if (thumbnailUrl.contains(".gif")) {
thumbnailUrl = img.attr("data-thumb");
@Nonnull
@Override
public List<Frameset> getFrames() throws ExtractionException {
try {
JsonObject jo = initialAjaxJson.getObject(2).getObject("player");
final String resp = jo.getObject("args").getString("player_response");
jo = JsonParser.object().from(resp);
final String[] spec = jo.getObject("storyboards").getObject("playerStoryboardSpecRenderer").getString("spec").split("\\|");
final String url = spec[0];
final ArrayList<Frameset> result = new ArrayList<>(spec.length - 1);
for (int i = 1; i < spec.length; ++i) {
final String[] parts = spec[i].split("#");
if (parts.length != 8) {
continue;
}
if (thumbnailUrl.startsWith("//")) {
thumbnailUrl = HTTPS + thumbnailUrl;
final int frameWidth = Integer.parseInt(parts[0]);
final int frameHeight = Integer.parseInt(parts[1]);
final int totalCount = Integer.parseInt(parts[2]);
final int framesPerPageX = Integer.parseInt(parts[3]);
final int framesPerPageY = Integer.parseInt(parts[4]);
final String baseUrl = url.replace("$L", String.valueOf(i - 1)).replace("$N", parts[6]) + "&sigh=" + parts[7];
final List<String> urls;
if (baseUrl.contains("$M")) {
final int totalPages = (int) Math.ceil(totalCount / (double) (framesPerPageX * framesPerPageY));
urls = new ArrayList<>(totalPages);
for (int j = 0; j < totalPages; j++) {
urls.add(baseUrl.replace("$M", String.valueOf(j)));
}
} else {
urls = Collections.singletonList(baseUrl);
}
return thumbnailUrl;
result.add(new Frameset(
urls,
frameWidth,
frameHeight,
totalCount,
framesPerPageX,
framesPerPageY
));
}
};
result.trimToSize();
return result;
} catch (Exception e) {
throw new ExtractionException(e);
}
}
@Nonnull
@Override
public List<Frameset> getFrames() throws ExtractionException {
try {
final String script = doc.select("#player-api").first().siblingElements().select("script").html();
int p = script.indexOf("ytplayer.config");
if (p == -1) {
return Collections.emptyList();
}
p = script.indexOf('{', p);
int e = script.indexOf("ytplayer.load", p);
if (e == -1) {
return Collections.emptyList();
}
JsonObject jo = JsonParser.object().from(script.substring(p, e - 1));
final String resp = jo.getObject("args").getString("player_response");
jo = JsonParser.object().from(resp);
final String[] spec = jo.getObject("storyboards").getObject("playerStoryboardSpecRenderer").getString("spec").split("\\|");
final String url = spec[0];
final ArrayList<Frameset> result = new ArrayList<>(spec.length - 1);
for (int i = 1; i < spec.length; ++i) {
final String[] parts = spec[i].split("#");
if (parts.length != 8) {
continue;
}
final int frameWidth = Integer.parseInt(parts[0]);
final int frameHeight = Integer.parseInt(parts[1]);
final int totalCount = Integer.parseInt(parts[2]);
final int framesPerPageX = Integer.parseInt(parts[3]);
final int framesPerPageY = Integer.parseInt(parts[4]);
final String baseUrl = url.replace("$L", String.valueOf(i - 1)).replace("$N", parts[6]) + "&sigh=" + parts[7];
final List<String> urls;
if (baseUrl.contains("$M")) {
final int totalPages = (int) Math.ceil(totalCount / (double) (framesPerPageX * framesPerPageY));
urls = new ArrayList<>(totalPages);
for (int j = 0; j < totalPages; j++) {
urls.add(baseUrl.replace("$M", String.valueOf(j)));
}
} else {
urls = Collections.singletonList(baseUrl);
}
result.add(new Frameset(
urls,
frameWidth,
frameHeight,
totalCount,
framesPerPageX,
framesPerPageY
));
}
result.trimToSize();
return result;
} catch (Exception e) {
throw new ExtractionException(e);
}
}
@Nonnull
@Override
public String getHost() {
return "";
}
@Nonnull
@Override
public String getPrivacy() {
return "";
}
@Nonnull
@Override
public String getCategory() {
return "";
}
@Nonnull
@Override
public String getLicence() {
return "";
}
@Override
public Locale getLanguageInfo() {
return null;
}
@Nonnull
@Override
public List<String> getTags() {
return new ArrayList<>();
}
@Nonnull
@Override
public String getSupportInfo() {
return "";
}
}

View File

@ -1,11 +1,12 @@
package org.schabi.newpipe.extractor.services.youtube.extractors;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.localization.TimeAgoParser;
import org.schabi.newpipe.extractor.localization.DateWrapper;
import org.schabi.newpipe.extractor.localization.TimeAgoParser;
import org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeParsingHelper;
import org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeStreamLinkHandlerFactory;
import org.schabi.newpipe.extractor.stream.StreamInfoItemExtractor;
import org.schabi.newpipe.extractor.stream.StreamType;
import org.schabi.newpipe.extractor.utils.Utils;
@ -15,6 +16,8 @@ import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Date;
import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeParsingHelper.*;
/*
* Copyright (C) Christian Schabesberger 2016 <chris.schabesberger@mailbox.org>
* YoutubeStreamInfoItemExtractor.java is part of NewPipe.
@ -35,111 +38,149 @@ import java.util.Date;
public class YoutubeStreamInfoItemExtractor implements StreamInfoItemExtractor {
private final Element item;
private JsonObject videoInfo;
private final TimeAgoParser timeAgoParser;
private String cachedUploadDate;
private StreamType cachedStreamType;
/**
* Creates an extractor of StreamInfoItems from a YouTube page.
* @param item The page element
*
* @param videoInfoItem The JSON page element
* @param timeAgoParser A parser of the textual dates or {@code null}.
*/
public YoutubeStreamInfoItemExtractor(Element item, @Nullable TimeAgoParser timeAgoParser) {
this.item = item;
public YoutubeStreamInfoItemExtractor(JsonObject videoInfoItem, @Nullable TimeAgoParser timeAgoParser) {
this.videoInfo = videoInfoItem;
this.timeAgoParser = timeAgoParser;
}
@Override
public StreamType getStreamType() throws ParsingException {
if (isLiveStream(item)) {
return StreamType.LIVE_STREAM;
} else {
return StreamType.VIDEO_STREAM;
public StreamType getStreamType() {
if (cachedStreamType != null) {
return cachedStreamType;
}
try {
JsonArray badges = videoInfo.getArray("badges");
for (Object badge : badges) {
if (((JsonObject) badge).getObject("metadataBadgeRenderer").getString("label").equals("LIVE NOW")) {
return cachedStreamType = StreamType.LIVE_STREAM;
}
}
} catch (Exception ignored) {}
try {
final String style = videoInfo.getArray("thumbnailOverlays").getObject(0)
.getObject("thumbnailOverlayTimeStatusRenderer").getString("style");
if (style.equalsIgnoreCase("LIVE")) {
return cachedStreamType = StreamType.LIVE_STREAM;
}
} catch (Exception ignored) {}
return cachedStreamType = StreamType.VIDEO_STREAM;
}
@Override
public boolean isAd() throws ParsingException {
return !item.select("span[class*=\"icon-not-available\"]").isEmpty()
|| !item.select("span[class*=\"yt-badge-ad\"]").isEmpty()
|| isPremiumVideo();
}
private boolean isPremiumVideo() {
Element premiumSpan = item.select("span[class=\"standalone-collection-badge-renderer-red-text\"]").first();
if(premiumSpan == null) return false;
// if this span has text it most likely says ("Free Video") so we can play this
if(premiumSpan.hasText()) return false;
return true;
return isPremium() || getName().equals("[Private video]") || getName().equals("[Deleted video]");
}
@Override
public String getUrl() throws ParsingException {
try {
Element el = item.select("div[class*=\"yt-lockup-video\"]").first();
Element dl = el.select("h3").first().select("a").first();
return dl.attr("abs:href");
String videoId = videoInfo.getString("videoId");
return YoutubeStreamLinkHandlerFactory.getInstance().getUrl(videoId);
} catch (Exception e) {
throw new ParsingException("Could not get web page url for the video", e);
throw new ParsingException("Could not get url", e);
}
}
@Override
public String getName() throws ParsingException {
try {
Element el = item.select("div[class*=\"yt-lockup-video\"]").first();
Element dl = el.select("h3").first().select("a").first();
return dl.text();
} catch (Exception e) {
throw new ParsingException("Could not get title", e);
}
String name = getTextFromObject(videoInfo.getObject("title"));
if (name != null && !name.isEmpty()) return name;
throw new ParsingException("Could not get name");
}
@Override
public long getDuration() throws ParsingException {
try {
if (getStreamType() == StreamType.LIVE_STREAM) return -1;
final Element duration = item.select("span[class*=\"video-time\"]").first();
// apparently on youtube, video-time element will not show up if the video has a duration of 00:00
// see: https://www.youtube.com/results?sp=EgIQAVAU&q=asdfgf
return duration == null ? 0 : YoutubeParsingHelper.parseDurationString(duration.text());
} catch (Exception e) {
throw new ParsingException("Could not get Duration: " + getUrl(), e);
if (getStreamType() == StreamType.LIVE_STREAM || isPremiere()) {
return -1;
}
String duration = null;
try {
duration = getTextFromObject(videoInfo.getObject("lengthText"));
} catch (Exception ignored) {}
if (duration == null) {
try {
for (Object thumbnailOverlay : videoInfo.getArray("thumbnailOverlays")) {
if (((JsonObject) thumbnailOverlay).getObject("thumbnailOverlayTimeStatusRenderer") != null) {
duration = getTextFromObject(((JsonObject) thumbnailOverlay)
.getObject("thumbnailOverlayTimeStatusRenderer").getObject("text"));
}
}
} catch (Exception ignored) {}
if (duration == null) throw new ParsingException("Could not get duration");
}
return YoutubeParsingHelper.parseDurationString(duration);
}
@Override
public String getUploaderName() throws ParsingException {
String name = null;
try {
return item.select("div[class=\"yt-lockup-byline\"]").first()
.select("a").first()
.text();
} catch (Exception e) {
throw new ParsingException("Could not get uploader", e);
name = getTextFromObject(videoInfo.getObject("longBylineText"));
} catch (Exception ignored) {}
if (name == null) {
try {
name = getTextFromObject(videoInfo.getObject("ownerText"));
} catch (Exception ignored) {}
if (name == null) {
try {
name = getTextFromObject(videoInfo.getObject("shortBylineText"));
} catch (Exception ignored) {}
if (name == null) throw new ParsingException("Could not get uploader name");
}
}
return name;
}
@Override
public String getUploaderUrl() throws ParsingException {
// this url is not always in the form "/channel/..."
// sometimes Youtube provides urls in the from "/user/..."
try {
try {
return item.select("div[class=\"yt-lockup-byline\"]").first()
.select("a").first()
.attr("abs:href");
} catch (Exception e){}
String url = null;
// try this if the first didn't work
return item.select("span[class=\"title\"")
.text().split(" - ")[0];
} catch (Exception e) {
System.out.println(item.html());
throw new ParsingException("Could not get uploader url", e);
try {
url = getUrlFromNavigationEndpoint(videoInfo.getObject("longBylineText")
.getArray("runs").getObject(0).getObject("navigationEndpoint"));
} catch (Exception ignored) {}
if (url == null) {
try {
url = getUrlFromNavigationEndpoint(videoInfo.getObject("ownerText")
.getArray("runs").getObject(0).getObject("navigationEndpoint"));
} catch (Exception ignored) {}
if (url == null) {
try {
url = getUrlFromNavigationEndpoint(videoInfo.getObject("shortBylineText")
.getArray("runs").getObject(0).getObject("navigationEndpoint"));
} catch (Exception ignored) {}
if (url == null) throw new ParsingException("Could not get uploader url");
}
}
return url;
}
@Nullable
@ -149,29 +190,16 @@ public class YoutubeStreamInfoItemExtractor implements StreamInfoItemExtractor {
return null;
}
if (cachedUploadDate != null) {
return cachedUploadDate;
if (isPremiere()) {
final Date date = getDateFromPremiere().getTime();
return new SimpleDateFormat("yyyy-MM-dd HH:mm").format(date);
}
try {
if (isVideoReminder()) {
final Calendar calendar = getDateFromReminder();
if (calendar != null) {
return cachedUploadDate = new SimpleDateFormat("yyyy-MM-dd HH:mm")
.format(calendar.getTime());
}
}
Element meta = item.select("div[class=\"yt-lockup-meta\"]").first();
if (meta == null) return "";
final Elements li = meta.select("li");
if (li.isEmpty()) return "";
return cachedUploadDate = li.first().text();
return getTextFromObject(videoInfo.getObject("publishedTimeText"));
} catch (Exception e) {
throw new ParsingException("Could not get upload date", e);
// upload date is not always available, e.g. in playlists
return null;
}
}
@ -182,115 +210,89 @@ public class YoutubeStreamInfoItemExtractor implements StreamInfoItemExtractor {
return null;
}
if (isVideoReminder()) {
return new DateWrapper(getDateFromReminder());
if (isPremiere()) {
return new DateWrapper(getDateFromPremiere());
}
String textualUploadDate = getTextualUploadDate();
final String textualUploadDate = getTextualUploadDate();
if (timeAgoParser != null && textualUploadDate != null && !textualUploadDate.isEmpty()) {
return timeAgoParser.parse(textualUploadDate);
} else {
return null;
try {
return timeAgoParser.parse(textualUploadDate);
} catch (ParsingException e) {
throw new ParsingException("Could not get upload date", e);
}
}
return null;
}
@Override
public long getViewCount() throws ParsingException {
String input;
final Element spanViewCount = item.select("span.view-count").first();
if (spanViewCount != null) {
input = spanViewCount.text();
} else if (getStreamType().equals(StreamType.LIVE_STREAM)) {
Element meta = item.select("ul.yt-lockup-meta-info").first();
if (meta == null) return 0;
final Elements li = meta.select("li");
if (li.isEmpty()) return 0;
input = li.first().text();
} else {
try {
Element meta = item.select("div.yt-lockup-meta").first();
if (meta == null) return -1;
// This case can happen if google releases a special video
if (meta.select("li").size() < 2) return -1;
input = meta.select("li").get(1).text();
} catch (IndexOutOfBoundsException e) {
throw new ParsingException("Could not parse yt-lockup-meta although available: " + getUrl(), e);
}
}
if (input == null) {
throw new ParsingException("Input is null");
}
try {
return Long.parseLong(Utils.removeNonDigitCharacters(input));
} catch (NumberFormatException e) {
// if this happens the video probably has no views
if (!input.isEmpty()){
return 0;
if (videoInfo.getObject("topStandaloneBadge") != null || isPremium()) {
return -1;
}
throw new ParsingException("Could not handle input: " + input, e);
final JsonObject viewCountObject = videoInfo.getObject("viewCountText");
if (viewCountObject == null) {
// This object is null when a video has its views hidden.
return -1;
}
final String viewCount = getTextFromObject(viewCountObject);
if (viewCount.toLowerCase().contains("no views")) {
return 0;
} else if (viewCount.toLowerCase().contains("recommended")) {
return -1;
}
return Long.parseLong(Utils.removeNonDigitCharacters(viewCount));
} catch (Exception e) {
throw new ParsingException("Could not get view count", e);
}
}
@Override
public String getThumbnailUrl() throws ParsingException {
try {
String url;
Element te = item.select("div[class=\"yt-thumb video-thumb\"]").first()
.select("img").first();
url = te.attr("abs:src");
// Sometimes youtube sends links to gif files which somehow seem to not exist
// anymore. Items with such gif also offer a secondary image source. So we are going
// to use that if we've caught such an item.
if (url.contains(".gif")) {
url = te.attr("abs:data-thumb");
}
return url;
// TODO: Don't simply get the first item, but look at all thumbnails and their resolution
String url = videoInfo.getObject("thumbnail").getArray("thumbnails")
.getObject(0).getString("url");
return fixThumbnailUrl(url);
} catch (Exception e) {
throw new ParsingException("Could not get thumbnail url", e);
}
}
private boolean isVideoReminder() {
return !item.select("span.yt-uix-livereminder").isEmpty();
}
private Calendar getDateFromReminder() throws ParsingException {
final Element timeFuture = item.select("span.yt-badge.localized-date").first();
if (timeFuture == null) {
throw new ParsingException("Span timeFuture is null");
}
final String timestamp = timeFuture.attr("data-timestamp");
if (!timestamp.isEmpty()) {
try {
final Calendar calendar = Calendar.getInstance();
calendar.setTime(new Date(Long.parseLong(timestamp) * 1000L));
return calendar;
} catch (Exception e) {
throw new ParsingException("Could not parse = \"" + timestamp + "\"");
private boolean isPremium() {
try {
JsonArray badges = videoInfo.getArray("badges");
for (Object badge : badges) {
if (((JsonObject) badge).getObject("metadataBadgeRenderer").getString("label").equals("Premium")) {
return true;
}
}
} catch (Exception ignored) {
}
throw new ParsingException("Could not parse date from reminder element: \"" + timeFuture + "\"");
return false;
}
/**
* Generic method that checks if the element contains any clues that it's a livestream item
*/
protected static boolean isLiveStream(Element item) {
return !item.select("span[class*=\"yt-badge-live\"]").isEmpty()
|| !item.select("span[class*=\"video-time-overlay-live\"]").isEmpty();
private boolean isPremiere() {
return videoInfo.has("upcomingEventData");
}
private Calendar getDateFromPremiere() throws ParsingException {
final JsonObject upcomingEventData = videoInfo.getObject("upcomingEventData");
final String startTime = upcomingEventData.getString("startTime");
try {
final long startTimeTimestamp = Long.parseLong(startTime);
final Calendar calendar = Calendar.getInstance();
calendar.setTime(new Date(startTimeTimestamp * 1000L));
return calendar;
} catch (Exception e) {
throw new ParsingException("Could not parse date from premiere: \"" + startTime + "\"");
}
}
}

View File

@ -3,9 +3,9 @@ package org.schabi.newpipe.extractor.services.youtube.extractors;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonParser;
import com.grack.nanojson.JsonParserException;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.suggestion.SuggestionExtractor;
@ -57,12 +57,12 @@ public class YoutubeSuggestionExtractor extends SuggestionExtractor {
String response = dl.get(url, getExtractorLocalization()).responseBody();
// trim JSONP part "JP(...)"
response = response.substring(3, response.length()-1);
response = response.substring(3, response.length() - 1);
try {
JsonArray collection = JsonParser.array().from(response).getArray(1, new JsonArray());
for (Object suggestion : collection) {
if (!(suggestion instanceof JsonArray)) continue;
String suggestionStr = ((JsonArray)suggestion).getString(0);
String suggestionStr = ((JsonArray) suggestion).getString(0);
if (suggestionStr == null) continue;
suggestions.add(suggestionStr);
}

View File

@ -20,27 +20,28 @@ package org.schabi.newpipe.extractor.services.youtube.extractors;
* along with NewPipe. If not, see <http://www.gnu.org/licenses/>.
*/
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.downloader.Response;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.kiosk.KioskExtractor;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler;
import org.schabi.newpipe.extractor.localization.TimeAgoParser;
import org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeParsingHelper;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import javax.annotation.Nonnull;
import java.io.IOException;
public class YoutubeTrendingExtractor extends KioskExtractor<StreamInfoItem> {
import javax.annotation.Nonnull;
private Document doc;
import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeParsingHelper.getJsonResponse;
import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeParsingHelper.getTextFromObject;
public class YoutubeTrendingExtractor extends KioskExtractor<StreamInfoItem> {
private JsonObject initialData;
public YoutubeTrendingExtractor(StreamingService service,
ListLinkHandler linkHandler,
@ -50,11 +51,12 @@ public class YoutubeTrendingExtractor extends KioskExtractor<StreamInfoItem> {
@Override
public void onFetchPage(@Nonnull Downloader downloader) throws IOException, ExtractionException {
final String url = getUrl() +
"?gl=" + getExtractorContentCountry().getCountryCode();
final String url = getUrl() + "?pbj=1&gl="
+ getExtractorContentCountry().getCountryCode();
final Response response = downloader.get(url, getExtractorLocalization());
doc = YoutubeParsingHelper.parseAndCheckPage(url, response);
final JsonArray ajaxJson = getJsonResponse(url, getExtractorLocalization());
initialData = ajaxJson.getObject(1).getObject("response");
}
@Override
@ -70,99 +72,39 @@ public class YoutubeTrendingExtractor extends KioskExtractor<StreamInfoItem> {
@Nonnull
@Override
public String getName() throws ParsingException {
String name;
try {
Element a = doc.select("a[href*=\"/feed/trending\"]").first();
Element span = a.select("span[class*=\"display-name\"]").first();
Element nameSpan = span.select("span").first();
return nameSpan.text();
name = getTextFromObject(initialData.getObject("header").getObject("feedTabbedHeaderRenderer").getObject("title"));
} catch (Exception e) {
throw new ParsingException("Could not get Trending name", e);
}
if (name != null && !name.isEmpty()) {
return name;
}
throw new ParsingException("Could not get Trending name");
}
@Nonnull
@Override
public InfoItemsPage<StreamInfoItem> getInitialPage() throws ParsingException {
public InfoItemsPage<StreamInfoItem> getInitialPage() {
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
Elements uls = doc.select("ul[class*=\"expanded-shelf-content-list\"]");
final TimeAgoParser timeAgoParser = getTimeAgoParser();
JsonArray itemSectionRenderers = initialData.getObject("contents").getObject("twoColumnBrowseResultsRenderer")
.getArray("tabs").getObject(0).getObject("tabRenderer").getObject("content")
.getObject("sectionListRenderer").getArray("contents");
for(Element ul : uls) {
for(final Element li : ul.children()) {
final Element el = li.select("div[class*=\"yt-lockup-dismissable\"]").first();
collector.commit(new YoutubeStreamInfoItemExtractor(li, timeAgoParser) {
@Override
public String getUrl() throws ParsingException {
try {
Element dl = el.select("h3").first().select("a").first();
return dl.attr("abs:href");
} catch (Exception e) {
throw new ParsingException("Could not get web page url for the video", e);
}
}
@Override
public String getName() throws ParsingException {
try {
Element dl = el.select("h3").first().select("a").first();
return dl.text();
} catch (Exception e) {
throw new ParsingException("Could not get web page url for the video", e);
}
}
@Override
public String getUploaderUrl() throws ParsingException {
try {
String link = getUploaderLink().attr("abs:href");
if (link.isEmpty()) {
throw new IllegalArgumentException("is empty");
}
return link;
} catch (Exception e) {
throw new ParsingException("Could not get Uploader name");
}
}
private Element getUploaderLink() {
// this url is not always in the form "/channel/..."
// sometimes Youtube provides urls in the from "/user/..."
Element uploaderEl = el.select("div[class*=\"yt-lockup-byline \"]").first();
return uploaderEl.select("a").first();
}
@Override
public String getUploaderName() throws ParsingException {
try {
return getUploaderLink().text();
} catch (Exception e) {
throw new ParsingException("Could not get Uploader name");
}
}
@Override
public String getThumbnailUrl() throws ParsingException {
try {
String url;
Element te = li.select("span[class=\"yt-thumb-simple\"]").first()
.select("img").first();
url = te.attr("abs:src");
// Sometimes youtube sends links to gif files which somehow seem to not exist
// anymore. Items with such gif also offer a secondary image source. So we are going
// to use that if we've caught such an item.
if (url.contains(".gif")) {
url = te.attr("abs:data-thumb");
}
return url;
} catch (Exception e) {
throw new ParsingException("Could not get thumbnail url", e);
}
}
});
for (Object itemSectionRenderer : itemSectionRenderers) {
JsonObject expandedShelfContentsRenderer = ((JsonObject) itemSectionRenderer).getObject("itemSectionRenderer")
.getArray("contents").getObject(0).getObject("shelfRenderer").getObject("content")
.getObject("expandedShelfContentsRenderer");
if (expandedShelfContentsRenderer != null) {
for (Object ul : expandedShelfContentsRenderer.getArray("items")) {
final JsonObject videoInfo = ((JsonObject) ul).getObject("videoRenderer");
collector.commit(new YoutubeStreamInfoItemExtractor(videoInfo, timeAgoParser));
}
}
}
return new InfoItemsPage<>(collector, getNextPageUrl());
}
}

View File

@ -35,6 +35,14 @@ public class YoutubeChannelLinkHandlerFactory extends ListLinkHandlerFactory {
return instance;
}
/**
* Returns URL to channel from an ID
*
* @param id Channel ID including e.g. 'channel/'
* @param contentFilters
* @param searchFilter
* @return URL to channel
*/
@Override
public String getUrl(String id, List<String> contentFilters, String searchFilter) {
return "https://www.youtube.com/" + id;

View File

@ -1,7 +1,10 @@
package org.schabi.newpipe.extractor.services.youtube.linkHandler;
import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeParsingHelper.BASE_YOUTUBE_INTENT_URL;
import org.schabi.newpipe.extractor.exceptions.FoundAdException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandler;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandlerFactory;
import java.util.List;
@ -14,6 +17,15 @@ public class YoutubeCommentsLinkHandlerFactory extends ListLinkHandlerFactory {
return instance;
}
@Override
public ListLinkHandler fromUrl(String url) throws ParsingException {
if (url.startsWith(BASE_YOUTUBE_INTENT_URL)){
return super.fromUrl(url, BASE_YOUTUBE_INTENT_URL);
} else {
return super.fromUrl(url);
}
}
@Override
public String getUrl(String id) {
return "https://m.youtube.com/watch?v=" + id;

View File

@ -1,17 +1,32 @@
package org.schabi.newpipe.extractor.services.youtube.linkHandler;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import com.grack.nanojson.JsonParserException;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.schabi.newpipe.extractor.downloader.Response;
import org.schabi.newpipe.extractor.exceptions.ContentNotAvailableException;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.exceptions.ReCaptchaException;
import org.schabi.newpipe.extractor.localization.Localization;
import org.schabi.newpipe.extractor.utils.Parser;
import org.schabi.newpipe.extractor.utils.Utils;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URL;
import java.net.URLDecoder;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Date;
import java.util.*;
import static org.schabi.newpipe.extractor.NewPipe.getDownloader;
import static org.schabi.newpipe.extractor.utils.Utils.HTTP;
import static org.schabi.newpipe.extractor.utils.Utils.HTTPS;
/*
* Created by Christian Schabesberger on 02.03.16.
@ -38,6 +53,15 @@ public class YoutubeParsingHelper {
private YoutubeParsingHelper() {
}
/**
* The official youtube app supports intents in this format, where after the ':' is the videoId.
* Accordingly there are other apps sharing streams in this format.
*/
public final static String BASE_YOUTUBE_INTENT_URL = "vnd.youtube";
private static final String HARDCODED_CLIENT_VERSION = "2.20200214.04.00";
private static String clientVersion;
private static final String FEED_BASE_CHANNEL_ID = "https://www.youtube.com/feeds/videos.xml?channel_id=";
private static final String FEED_BASE_USER = "https://www.youtube.com/feeds/videos.xml?user=";
@ -143,4 +167,245 @@ public class YoutubeParsingHelper {
uploadDate.setTime(date);
return uploadDate;
}
public static JsonObject getInitialData(String html) throws ParsingException {
try {
String initialData = Parser.matchGroup1("window\\[\"ytInitialData\"\\]\\s*=\\s*(\\{.*?\\});", html);
return JsonParser.object().from(initialData);
} catch (JsonParserException | Parser.RegexException e) {
throw new ParsingException("Could not get ytInitialData", e);
}
}
public static boolean isHardcodedClientVersionValid() throws IOException, ExtractionException {
final String url = "https://www.youtube.com/results?search_query=test&pbj=1";
Map<String, List<String>> headers = new HashMap<>();
headers.put("X-YouTube-Client-Name", Collections.singletonList("1"));
headers.put("X-YouTube-Client-Version",
Collections.singletonList(HARDCODED_CLIENT_VERSION));
final String response = getDownloader().get(url, headers).responseBody();
return response.length() > 50; // ensure to have a valid response
}
/**
* Get the client version from a page
* @return
* @throws ParsingException
*/
public static String getClientVersion() throws IOException, ExtractionException {
if (clientVersion != null && !clientVersion.isEmpty()) return clientVersion;
if (isHardcodedClientVersionValid()) {
clientVersion = HARDCODED_CLIENT_VERSION;
return clientVersion;
}
final String url = "https://www.youtube.com/results?search_query=test";
final String html = getDownloader().get(url).responseBody();
JsonObject initialData = getInitialData(html);
JsonArray serviceTrackingParams = initialData.getObject("responseContext").getArray("serviceTrackingParams");
String shortClientVersion = null;
// try to get version from initial data first
for (Object service : serviceTrackingParams) {
JsonObject s = (JsonObject) service;
if (s.getString("service").equals("CSI")) {
JsonArray params = s.getArray("params");
for (Object param : params) {
JsonObject p = (JsonObject) param;
String key = p.getString("key");
if (key != null && key.equals("cver")) {
clientVersion = p.getString("value");
return clientVersion;
}
}
} else if (s.getString("service").equals("ECATCHER")) {
// fallback to get a shortened client version which does not contain the last two digits
JsonArray params = s.getArray("params");
for (Object param : params) {
JsonObject p = (JsonObject) param;
String key = p.getString("key");
if (key != null && key.equals("client.version")) {
shortClientVersion = p.getString("value");
}
}
}
}
String contextClientVersion;
String[] patterns = {
"INNERTUBE_CONTEXT_CLIENT_VERSION\":\"([0-9\\.]+?)\"",
"innertube_context_client_version\":\"([0-9\\.]+?)\"",
"client.version=([0-9\\.]+)"
};
for (String pattern : patterns) {
try {
contextClientVersion = Parser.matchGroup1(pattern, html);
if (contextClientVersion != null && !contextClientVersion.isEmpty()) {
clientVersion = contextClientVersion;
return clientVersion;
}
} catch (Exception ignored) {
}
}
if (shortClientVersion != null) {
clientVersion = shortClientVersion;
return clientVersion;
}
throw new ParsingException("Could not get client version");
}
public static String getUrlFromNavigationEndpoint(JsonObject navigationEndpoint) throws ParsingException {
if (navigationEndpoint.getObject("urlEndpoint") != null) {
String internUrl = navigationEndpoint.getObject("urlEndpoint").getString("url");
if (internUrl.startsWith("/redirect?")) {
// q parameter can be the first parameter
internUrl = internUrl.substring(10);
String[] params = internUrl.split("&");
for (String param : params) {
if (param.split("=")[0].equals("q")) {
String url;
try {
url = URLDecoder.decode(param.split("=")[1], "UTF-8");
} catch (UnsupportedEncodingException e) {
return null;
}
return url;
}
}
} else if (internUrl.startsWith("http")) {
return internUrl;
}
} else if (navigationEndpoint.getObject("browseEndpoint") != null) {
final JsonObject browseEndpoint = navigationEndpoint.getObject("browseEndpoint");
final String canonicalBaseUrl = browseEndpoint.getString("canonicalBaseUrl");
final String browseId = browseEndpoint.getString("browseId");
// All channel ids are prefixed with UC
if (browseId != null && browseId.startsWith("UC")) {
return "https://www.youtube.com/channel/" + browseId;
}
if (canonicalBaseUrl != null && !canonicalBaseUrl.isEmpty()) {
return "https://www.youtube.com" + canonicalBaseUrl;
}
throw new ParsingException("canonicalBaseUrl is null and browseId is not a channel (\"" + browseEndpoint + "\")");
} else if (navigationEndpoint.getObject("watchEndpoint") != null) {
StringBuilder url = new StringBuilder();
url.append("https://www.youtube.com/watch?v=").append(navigationEndpoint.getObject("watchEndpoint").getString("videoId"));
if (navigationEndpoint.getObject("watchEndpoint").has("playlistId"))
url.append("&amp;list=").append(navigationEndpoint.getObject("watchEndpoint").getString("playlistId"));
if (navigationEndpoint.getObject("watchEndpoint").has("startTimeSeconds"))
url.append("&amp;t=").append(navigationEndpoint.getObject("watchEndpoint").getInt("startTimeSeconds"));
return url.toString();
}
return null;
}
public static String getTextFromObject(JsonObject textObject, boolean html) throws ParsingException {
if (textObject.has("simpleText")) return textObject.getString("simpleText");
StringBuilder textBuilder = new StringBuilder();
for (Object textPart : textObject.getArray("runs")) {
String text = ((JsonObject) textPart).getString("text");
if (html && ((JsonObject) textPart).getObject("navigationEndpoint") != null) {
String url = getUrlFromNavigationEndpoint(((JsonObject) textPart).getObject("navigationEndpoint"));
if (url != null && !url.isEmpty()) {
textBuilder.append("<a href=\"").append(url).append("\">").append(text).append("</a>");
continue;
}
}
textBuilder.append(text);
}
String text = textBuilder.toString();
if (html) {
text = text.replaceAll("\\n", "<br>");
text = text.replaceAll(" ", " &nbsp;");
}
return text;
}
public static String getTextFromObject(JsonObject textObject) throws ParsingException {
return getTextFromObject(textObject, false);
}
public static String fixThumbnailUrl(String thumbnailUrl) {
if (thumbnailUrl.startsWith("//")) {
thumbnailUrl = thumbnailUrl.substring(2);
}
if (thumbnailUrl.startsWith(HTTP)) {
thumbnailUrl = Utils.replaceHttpWithHttps(thumbnailUrl);
} else if (!thumbnailUrl.startsWith(HTTPS)) {
thumbnailUrl = "https://" + thumbnailUrl;
}
return thumbnailUrl;
}
public static JsonArray getJsonResponse(String url, Localization localization) throws IOException, ExtractionException {
Map<String, List<String>> headers = new HashMap<>();
headers.put("X-YouTube-Client-Name", Collections.singletonList("1"));
headers.put("X-YouTube-Client-Version", Collections.singletonList(getClientVersion()));
final Response response = getDownloader().get(url, headers, localization);
if (response.responseCode() == 404) {
throw new ContentNotAvailableException("Not found" +
" (\"" + response.responseCode() + " " + response.responseMessage() + "\")");
}
final String responseBody = response.responseBody();
if (responseBody.length() < 50) { // ensure to have a valid response
throw new ParsingException("JSON response is too short");
}
// Check if the request was redirected to the error page.
final URL latestUrl = new URL(response.latestUrl());
if (latestUrl.getHost().equalsIgnoreCase("www.youtube.com")) {
final String path = latestUrl.getPath();
if (path.equalsIgnoreCase("/oops") || path.equalsIgnoreCase("/error")) {
throw new ContentNotAvailableException("Content unavailable");
}
}
final String responseContentType = response.getHeader("Content-Type");
if (responseContentType != null && responseContentType.toLowerCase().contains("text/html")) {
throw new ParsingException("Got HTML document, expected JSON response" +
" (latest url was: \"" + response.latestUrl() + "\")");
}
try {
return JsonParser.array().from(responseBody);
} catch (JsonParserException e) {
throw new ParsingException("Could not parse JSON", e);
}
}
/**
* Shared alert detection function, multiple endpoints return the error similarly structured.
* <p>
* Will check if the object has an alert of the type "ERROR".
*
* @param initialData the object which will be checked if an alert is present
* @throws ContentNotAvailableException if an alert is detected
*/
public static void defaultAlertsCheck(JsonObject initialData) throws ContentNotAvailableException {
final JsonArray alerts = initialData.getArray("alerts");
if (alerts != null && !alerts.isEmpty()) {
final JsonObject alertRenderer = alerts.getObject(0).getObject("alertRenderer");
final String alertText = alertRenderer.getObject("text").getString("simpleText");
final String alertType = alertRenderer.getString("type");
if (alertType.equalsIgnoreCase("ERROR")) {
throw new ContentNotAvailableException("Got error: \"" + alertText + "\"");
}
}
}
}

View File

@ -31,7 +31,7 @@ public class YoutubePlaylistLinkHandlerFactory extends ListLinkHandlerFactory {
}
String path = urlObj.getPath();
if (!path.equals("/watch" ) && !path.equals("/playlist")) {
if (!path.equals("/watch") && !path.equals("/playlist")) {
throw new ParsingException("the url given is neither a video nor a playlist URL");
}

View File

@ -24,13 +24,13 @@ public class YoutubeSearchQueryHandlerFactory extends SearchQueryHandlerFactory
public String getUrl(String searchString, List<String> contentFilters, String sortFilter) throws ParsingException {
try {
final String url = "https://www.youtube.com/results"
+ "?q=" + URLEncoder.encode(searchString, CHARSET_UTF_8);
+ "?search_query=" + URLEncoder.encode(searchString, CHARSET_UTF_8);
if(contentFilters.size() > 0) {
if (contentFilters.size() > 0) {
switch (contentFilters.get(0)) {
case VIDEOS: return url + "&sp=EgIQAVAU";
case CHANNELS: return url + "&sp=EgIQAlAU";
case PLAYLISTS: return url + "&sp=EgIQA1AU";
case VIDEOS: return url + "&sp=EgIQAQ%253D%253D";
case CHANNELS: return url + "&sp=EgIQAg%253D%253D";
case PLAYLISTS: return url + "&sp=EgIQAw%253D%253D";
case ALL:
default:
}
@ -44,7 +44,7 @@ public class YoutubeSearchQueryHandlerFactory extends SearchQueryHandlerFactory
@Override
public String[] getAvailableContentFilter() {
return new String[] {
return new String[]{
ALL,
VIDEOS,
CHANNELS,

View File

@ -1,7 +1,10 @@
package org.schabi.newpipe.extractor.services.youtube.linkHandler;
import static org.schabi.newpipe.extractor.services.youtube.linkHandler.YoutubeParsingHelper.BASE_YOUTUBE_INTENT_URL;
import org.schabi.newpipe.extractor.exceptions.FoundAdException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.linkhandler.LinkHandler;
import org.schabi.newpipe.extractor.linkhandler.LinkHandlerFactory;
import org.schabi.newpipe.extractor.utils.Utils;
@ -49,6 +52,15 @@ public class YoutubeStreamLinkHandlerFactory extends LinkHandlerFactory {
return id;
}
@Override
public LinkHandler fromUrl(String url) throws ParsingException {
if (url.startsWith(BASE_YOUTUBE_INTENT_URL)){
return super.fromUrl(url, BASE_YOUTUBE_INTENT_URL);
} else {
return super.fromUrl(url);
}
}
@Override
public String getUrl(String id) {
return "https://www.youtube.com/watch?v=" + id;
@ -190,7 +202,12 @@ public class YoutubeStreamLinkHandlerFactory extends LinkHandlerFactory {
return assertIsID(id);
}
break;
String viewQueryValue = Utils.getQueryValue(url, "v");
if (viewQueryValue != null) {
return assertIsID(viewQueryValue);
}
return assertIsID(path);
}
}

View File

@ -0,0 +1,31 @@
package org.schabi.newpipe.extractor.stream;
import java.io.Serializable;
public class Description implements Serializable {
public static final int HTML = 1;
public static final int MARKDOWN = 2;
public static final int PLAIN_TEXT = 3;
public static final Description emptyDescription = new Description("", PLAIN_TEXT);
private String content;
private int type;
public Description(String content, int type) {
this.type = type;
if (content == null) {
this.content = "";
} else {
this.content = content;
}
}
public String getContent() {
return content;
}
public int getType() {
return type;
}
}

View File

@ -1,66 +1,64 @@
package org.schabi.newpipe.extractor.stream;
import javax.annotation.Nullable;
import java.util.Collection;
import java.util.List;
public final class Frameset {
private List<String> urls;
private int frameWidth;
private int frameHeight;
private int totalCount;
private int framesPerPageX;
private int framesPerPageY;
private List<String> urls;
private int frameWidth;
private int frameHeight;
private int totalCount;
private int framesPerPageX;
private int framesPerPageY;
public Frameset(List<String> urls, int frameWidth, int frameHeight, int totalCount, int framesPerPageX, int framesPerPageY) {
this.urls = urls;
this.totalCount = totalCount;
this.frameWidth = frameWidth;
this.frameHeight = frameHeight;
this.framesPerPageX = framesPerPageX;
this.framesPerPageY = framesPerPageY;
}
public Frameset(List<String> urls, int frameWidth, int frameHeight, int totalCount, int framesPerPageX, int framesPerPageY) {
this.urls = urls;
this.totalCount = totalCount;
this.frameWidth = frameWidth;
this.frameHeight = frameHeight;
this.framesPerPageX = framesPerPageX;
this.framesPerPageY = framesPerPageY;
}
/**
* @return list of urls to images with frames
*/
public List<String> getUrls() {
return urls;
}
/**
* @return list of urls to images with frames
*/
public List<String> getUrls() {
return urls;
}
/**
* @return total count of frames
*/
public int getTotalCount() {
return totalCount;
}
/**
* @return total count of frames
*/
public int getTotalCount() {
return totalCount;
}
/**
* @return maximum frames count by x
*/
public int getFramesPerPageX() {
return framesPerPageX;
}
/**
* @return maximum frames count by x
*/
public int getFramesPerPageX() {
return framesPerPageX;
}
/**
* @return maximum frames count by y
*/
public int getFramesPerPageY() {
return framesPerPageY;
}
/**
* @return maximum frames count by y
*/
public int getFramesPerPageY() {
return framesPerPageY;
}
/**
* @return width of a one frame, in pixels
*/
public int getFrameWidth() {
return frameWidth;
}
/**
* @return width of a one frame, in pixels
*/
public int getFrameWidth() {
return frameWidth;
}
/**
* @return height of a one frame, in pixels
*/
public int getFrameHeight() {
return frameHeight;
}
/**
* @return height of a one frame, in pixels
*/
public int getFrameHeight() {
return frameHeight;
}
}

View File

@ -1,10 +1,10 @@
package org.schabi.newpipe.extractor.stream;
import org.schabi.newpipe.extractor.MediaFormat;
import java.io.Serializable;
import java.util.List;
import org.schabi.newpipe.extractor.MediaFormat;
/**
* Creates a stream object from url, format and optional torrent url
*/
@ -22,19 +22,19 @@ public abstract class Stream implements Serializable {
/**
* Instantiates a new stream object.
*
* @param url the url
* @param url the url
* @param format the format
*/
public Stream(String url, MediaFormat format) {
this(url, null, format);
}
/**
* Instantiates a new stream object.
*
* @param url the url
* @param url the url
* @param torrentUrl the url to torrent file, example https://webtorrent.io/torrents/big-buck-bunny.torrent
* @param format the format
* @param format the format
*/
public Stream(String url, String torrentUrl, MediaFormat format) {
this.url = url;
@ -76,7 +76,7 @@ public abstract class Stream implements Serializable {
public String getUrl() {
return url;
}
/**
* Gets the torrent url.
*

View File

@ -34,6 +34,7 @@ import javax.annotation.Nullable;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
/**
* Scrapes information from a video/audio streaming service (eg, YouTube).
@ -75,6 +76,7 @@ public abstract class StreamExtractor extends Extractor {
/**
* This will return the url to the thumbnail of the stream. Try to return the medium resolution here.
*
* @return The url of the thumbnail.
* @throws ParsingException
*/
@ -82,15 +84,17 @@ public abstract class StreamExtractor extends Extractor {
public abstract String getThumbnailUrl() throws ParsingException;
/**
* This is the stream description. On YouTube this is the video description. You can return simple HTML here.
* @return The description of the stream/video.
* This is the stream description.
*
* @return The description of the stream/video or Description.emptyDescription if the description is empty.
* @throws ParsingException
*/
@Nonnull
public abstract String getDescription() throws ParsingException;
public abstract Description getDescription() throws ParsingException;
/**
* Get the age limit.
*
* @return The age which limits the content or {@value NO_AGE_LIMIT} if there is no limit
* @throws ParsingException if an error occurs while parsing
*/
@ -98,6 +102,7 @@ public abstract class StreamExtractor extends Extractor {
/**
* This should return the length of a video in seconds.
*
* @return The length of the stream in seconds.
* @throws ParsingException
*/
@ -107,6 +112,7 @@ public abstract class StreamExtractor extends Extractor {
* If the url you are currently handling contains a time stamp/seek, you can return the
* position it represents here.
* If the url has no time stamp simply return zero.
*
* @return the timestamp in seconds
* @throws ParsingException
*/
@ -115,22 +121,25 @@ public abstract class StreamExtractor extends Extractor {
/**
* The count of how many people have watched the video/listened to the audio stream.
* If the current stream has no view count or its not available simply return -1
*
* @return amount of views.
* @throws ParsingException
*/
public abstract long getViewCount() throws ParsingException;
/**
* The Amount of likes a video/audio stream got.
* The amount of likes a video/audio stream got.
* If the current stream has no likes or its not available simply return -1
*
* @return the amount of likes the stream got
* @throws ParsingException
*/
public abstract long getLikeCount() throws ParsingException;
/**
* The Amount of dislikes a video/audio stream got.
* The amount of dislikes a video/audio stream got.
* If the current stream has no dislikes or its not available simply return -1
*
* @return the amount of likes the stream got
* @throws ParsingException
*/
@ -142,6 +151,7 @@ public abstract class StreamExtractor extends Extractor {
* <a href="https://teamnewpipe.github.io/documentation/03_Implement_a_service/#channel">ChannelExtractor</a>,
* so be sure to implement that one before you return a value here, otherwise NewPipe will crash if one selects
* this url.
*
* @return the url to the page of the creator/uploader of the stream or an empty String
* @throws ParsingException
*/
@ -151,6 +161,7 @@ public abstract class StreamExtractor extends Extractor {
/**
* The name of the creator/uploader of the stream.
* If the name is not available you can simply return an empty string.
*
* @return the name of the creator/uploader of the stream or an empty String
* @throws ParsingException
*/
@ -160,6 +171,7 @@ public abstract class StreamExtractor extends Extractor {
/**
* The url to the image file/profile picture/avatar of the creator/uploader of the stream.
* If the url is not available you can return an empty String.
*
* @return The url of the image file of the uploader or an empty String
* @throws ParsingException
*/
@ -169,20 +181,24 @@ public abstract class StreamExtractor extends Extractor {
/**
* Get the dash mpd url. If you don't know what a dash MPD is you can read about it
* <a href="https://www.brendanlong.com/the-structure-of-an-mpeg-dash-mpd.html">here</a>.
*
* @return the url as a string or an empty string
* @throws ParsingException if an error occurs while reading
*/
@Nonnull public abstract String getDashMpdUrl() throws ParsingException;
@Nonnull
public abstract String getDashMpdUrl() throws ParsingException;
/**
* I am not sure if this is in use, and how this is used. However the frontend is missing support
* for HLS streams. Prove me if I am wrong. Please open an
* <a href="https://github.com/teamnewpipe/newpipe/issues">issue</a>,
* or fix this description if you know whats up with this.
*
* @return The Url to the hls stream.
* @throws ParsingException
*/
@Nonnull public abstract String getHlsUrl() throws ParsingException;
@Nonnull
public abstract String getHlsUrl() throws ParsingException;
/**
* This should return a list of available
@ -190,6 +206,7 @@ public abstract class StreamExtractor extends Extractor {
* You can also return null or an empty list, however be aware that if you don't return anything
* in getVideoStreams(), getVideoOnlyStreams() and getDashMpdUrl() either the Collector will handle this as
* a failed extraction procedure.
*
* @return a list of audio only streams in the format of AudioStream
* @throws IOException
* @throws ExtractionException
@ -203,6 +220,7 @@ public abstract class StreamExtractor extends Extractor {
* You can also return null or an empty list, however be aware that if you don't return anything
* in getAudioStreams(), getVideoOnlyStreams() and getDashMpdUrl() either the Collector will handle this as
* a failed extraction procedure.
*
* @return a list of combined video and streams in the format of AudioStream
* @throws IOException
* @throws ExtractionException
@ -216,6 +234,7 @@ public abstract class StreamExtractor extends Extractor {
* You can also return null or an empty list, however be aware that if you don't return anything
* in getAudioStreams(), getVideoStreams() and getDashMpdUrl() either the Collector will handle this as
* a failed extraction procedure.
*
* @return a list of video and streams in the format of AudioStream
* @throws IOException
* @throws ExtractionException
@ -226,6 +245,7 @@ public abstract class StreamExtractor extends Extractor {
* This will return a list of available
* <a href="https://teamnewpipe.github.io/NewPipeExtractor/javadoc/org/schabi/newpipe/extractor/stream/Subtitles.html">Subtitles</a>s.
* If no subtitles are available an empty list can returned.
*
* @return a list of available subtitles or an empty list
* @throws IOException
* @throws ExtractionException
@ -238,6 +258,7 @@ public abstract class StreamExtractor extends Extractor {
* <a href="https://teamnewpipe.github.io/NewPipeExtractor/javadoc/org/schabi/newpipe/extractor/stream/Subtitles.html">Subtitles</a>s.
* given by a specific type.
* If no subtitles in that specific format are available an empty list can returned.
*
* @param format the media format by which the subtitles should be filtered
* @return a list of available subtitles or an empty list
* @throws IOException
@ -248,15 +269,17 @@ public abstract class StreamExtractor extends Extractor {
/**
* Get the <a href="https://teamnewpipe.github.io/NewPipeExtractor/javadoc/">StreamType</a>.
*
* @return the type of the stream
* @throws ParsingException
*/
public abstract StreamType getStreamType() throws ParsingException;
/**
* should return the url of the next stream. NewPipe will automatically play
* Should return the url of the next stream. NewPipe will automatically play
* the next stream if the user wants that.
* If the next stream is is not available simply return null
*
* @return the InfoItem of the next stream
* @throws IOException
* @throws ExtractionException
@ -268,7 +291,8 @@ public abstract class StreamExtractor extends Extractor {
* streams. If you don't like suggested streams you should implement them anyway since they can
* be disabled by the user later in the frontend.
* This list MUST NOT contain the next available video as this should be return through getNextStream()
* If is is not available simply return null
* If it is not available simply return null
*
* @return a list of InfoItems showing the related videos/streams
* @throws IOException
* @throws ExtractionException
@ -277,6 +301,7 @@ public abstract class StreamExtractor extends Extractor {
/**
* Should return a list of Frameset object that contains preview of stream frames
*
* @return list of preview frames or empty list if frames preview is not supported or not found for specified stream
* @throws IOException
* @throws ExtractionException
@ -299,9 +324,10 @@ public abstract class StreamExtractor extends Extractor {
/**
* Override this function if the format of time stamp in the url is not the same format as that form youtube.
* Honestly I don't even know the time stamp fromat of youtube.
* Honestly I don't even know the time stamp format of YouTube.
*
* @param regexPattern
* @return the sime stamp/seek for the video in seconds
* @return the time stamp/seek for the video in seconds
* @throws ParsingException
*/
protected long getTimestampSeconds(String regexPattern) throws ParsingException {
@ -309,10 +335,10 @@ public abstract class StreamExtractor extends Extractor {
try {
timeStamp = Parser.matchGroup1(regexPattern, getOriginalUrl());
} catch (Parser.RegexException e) {
// catch this instantly since an url does not necessarily have to have a time stamp
// catch this instantly since a url does not necessarily have a timestamp
// -2 because well the testing system will then know its the regex that failed :/
// not good i know
// -2 because the testing system will consequently know that the regex failed
// not good, I know
return -2;
}
@ -349,4 +375,81 @@ public abstract class StreamExtractor extends Extractor {
return 0;
}
}
/**
* The host of the stream (Eg. peertube.cpy.re).
* If the host is not available, or if the service doesn't use
* a federated system, but a centralised system,
* you can simply return an empty string.
*
* @return the host of the stream or an empty String.
* @throws ParsingException
*/
@Nonnull
public abstract String getHost() throws ParsingException;
/**
* The privacy of the stream (Eg. Public, Private, Unlisted).
* If the privacy is not available you can simply return an empty string.
*
* @return the privacy of the stream or an empty String.
* @throws ParsingException
*/
@Nonnull
public abstract String getPrivacy() throws ParsingException;
/**
* The name of the category of the stream.
* If the category is not available you can simply return an empty string.
*
* @return the category of the stream or an empty String.
* @throws ParsingException
*/
@Nonnull
public abstract String getCategory() throws ParsingException;
/**
* The name of the licence of the stream.
* If the licence is not available you can simply return an empty string.
*
* @return the licence of the stream or an empty String.
* @throws ParsingException
*/
@Nonnull
public abstract String getLicence() throws ParsingException;
/**
* The locale language of the stream.
* If the language is not available you can simply return null.
* If the language is provided by a language code, you can return
* new Locale(language_code);
*
* @return the locale language of the stream or null.
* @throws ParsingException
*/
@Nullable
public abstract Locale getLanguageInfo() throws ParsingException;
/**
* The list of tags of the stream.
* If the tag list is not available you can simply return an empty list.
*
* @return the list of tags of the stream or an empty list.
* @throws ParsingException
*/
@Nonnull
public abstract List<String> getTags() throws ParsingException;
/**
* The support information of the stream.
* see: https://framatube.org/videos/watch/ee408ec8-07cd-4e35-b884-fb681a4b9d37
* (support button).
* If the support information are not available,
* you can simply return an empty String.
*
* @return the support information of the stream or an empty String.
* @throws ParsingException
*/
@Nonnull
public abstract String getSupportInfo() throws ParsingException;
}

View File

@ -13,6 +13,7 @@ import org.schabi.newpipe.extractor.utils.ExtractorHelper;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
/*
* Created by Christian Schabesberger on 26.08.15.
@ -270,6 +271,43 @@ public class StreamInfo extends Info {
streamInfo.addError(e);
}
//additional info
try {
streamInfo.setHost(extractor.getHost());
} catch (Exception e) {
streamInfo.addError(e);
}
try {
streamInfo.setPrivacy(extractor.getPrivacy());
} catch (Exception e) {
streamInfo.addError(e);
}
try {
streamInfo.setCategory(extractor.getCategory());
} catch (Exception e) {
streamInfo.addError(e);
}
try {
streamInfo.setLicence(extractor.getLicence());
} catch (Exception e) {
streamInfo.addError(e);
}
try {
streamInfo.setLanguageInfo(extractor.getLanguageInfo());
} catch (Exception e) {
streamInfo.addError(e);
}
try {
streamInfo.setTags(extractor.getTags());
} catch (Exception e) {
streamInfo.addError(e);
}
try {
streamInfo.setSupportInfo(extractor.getSupportInfo());
} catch (Exception e) {
streamInfo.addError(e);
}
streamInfo.setRelatedStreams(ExtractorHelper.getRelatedVideosOrLogError(streamInfo, extractor));
return streamInfo;
@ -281,7 +319,7 @@ public class StreamInfo extends Info {
private DateWrapper uploadDate;
private long duration = -1;
private int ageLimit = -1;
private String description;
private Description description;
private long viewCount = -1;
private long likeCount = -1;
@ -308,6 +346,14 @@ public class StreamInfo extends Info {
private long startPosition = 0;
private List<SubtitlesStream> subtitles = new ArrayList<>();
private String host = "";
private String privacy = "";
private String category = "";
private String licence = "";
private String support = "";
private Locale language = null;
private List<String> tags = new ArrayList<>();
/**
* Get the stream type
*
@ -371,11 +417,11 @@ public class StreamInfo extends Info {
this.ageLimit = ageLimit;
}
public String getDescription() {
public Description getDescription() {
return description;
}
public void setDescription(String description) {
public void setDescription(Description description) {
this.description = description;
}
@ -533,4 +579,59 @@ public class StreamInfo extends Info {
this.subtitles = subtitles;
}
public String getHost() {
return this.host;
}
public void setHost(String str) {
this.host = str;
}
public String getPrivacy() {
return this.privacy;
}
public void setPrivacy(String str) {
this.privacy = str;
}
public String getCategory() {
return this.category;
}
public void setCategory(String cat) {
this.category = cat;
}
public String getLicence() {
return this.licence;
}
public void setLicence(String str) {
this.licence = str;
}
public Locale getLanguageInfo() {
return this.language;
}
public void setLanguageInfo(Locale lang) {
this.language = lang;
}
public List<String> getTags() {
return this.tags;
}
public void setTags(List<String> tags) {
this.tags = tags;
}
public void setSupportInfo(String support) {
this.support = support;
}
public String getSupportInfo() {
return this.support;
}
}

View File

@ -31,6 +31,7 @@ public interface StreamInfoItemExtractor extends InfoItemExtractor {
/**
* Get the stream type
*
* @return the stream type
* @throws ParsingException thrown if there is an error in the extraction
*/
@ -38,6 +39,7 @@ public interface StreamInfoItemExtractor extends InfoItemExtractor {
/**
* Check if the stream is an ad.
*
* @return {@code true} if the stream is an ad.
* @throws ParsingException thrown if there is an error in the extraction
*/
@ -45,6 +47,7 @@ public interface StreamInfoItemExtractor extends InfoItemExtractor {
/**
* Get the stream duration in seconds
*
* @return the stream duration in seconds
* @throws ParsingException thrown if there is an error in the extraction
*/
@ -52,6 +55,7 @@ public interface StreamInfoItemExtractor extends InfoItemExtractor {
/**
* Parses the number of views
*
* @return the number of views or -1 for live streams
* @throws ParsingException thrown if there is an error in the extraction
*/
@ -59,6 +63,7 @@ public interface StreamInfoItemExtractor extends InfoItemExtractor {
/**
* Get the uploader name
*
* @return the uploader name
* @throws ParsingException if parsing fails
*/
@ -80,9 +85,9 @@ public interface StreamInfoItemExtractor extends InfoItemExtractor {
/**
* Extracts the upload date and time of this item and parses it.
* <p>
* If the service doesn't provide an exact time, an approximation can be returned.
* <br>
* If the service doesn't provide any date at all, then {@code null} should be returned.
* If the service doesn't provide an exact time, an approximation can be returned.
* <br>
* If the service doesn't provide any date at all, then {@code null} should be returned.
* </p>
*
* @return The date and time (can be approximated) this item was uploaded or {@code null}.

View File

@ -101,8 +101,8 @@ public class StreamInfoItemsCollector extends InfoItemsCollector<StreamInfoItem,
public List<StreamInfoItem> getStreamInfoItemList() {
List<StreamInfoItem> siiList = new Vector<>();
for(InfoItem ii : super.getItems()) {
if(ii instanceof StreamInfoItem) {
for (InfoItem ii : super.getItems()) {
if (ii instanceof StreamInfoItem) {
siiList.add((StreamInfoItem) ii);
}
}

View File

@ -52,7 +52,7 @@ public class SubtitlesStream extends Stream implements Serializable {
@Override
public boolean equalStats(Stream cmp) {
return super.equalStats(cmp)&&
return super.equalStats(cmp) &&
cmp instanceof SubtitlesStream &&
code.equals(((SubtitlesStream) cmp).code) &&
autoGenerated == ((SubtitlesStream) cmp).autoGenerated;
@ -67,7 +67,7 @@ public class SubtitlesStream extends Stream implements Serializable {
}
public Locale getLocale() {
return locale;
return locale;
}
}

View File

@ -36,7 +36,7 @@ public class VideoStream extends Stream {
this.resolution = resolution;
this.isVideoOnly = isVideoOnly;
}
public VideoStream(String url, String torrentUrl, MediaFormat format, String resolution) {
this(url, torrentUrl, format, resolution, false);
}
@ -56,6 +56,7 @@ public class VideoStream extends Stream {
/**
* Get the video resolution
*
* @return the video resolution
*/
public String getResolution() {
@ -64,8 +65,9 @@ public class VideoStream extends Stream {
/**
* Check if the video is video only.
*
* <p>
* Video only streams have no audio
*
* @return {@code true} if this stream is vid
*/
public boolean isVideoOnly() {

View File

@ -1,8 +1,8 @@
package org.schabi.newpipe.extractor.utils;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.MediaFormat;
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.exceptions.ReCaptchaException;
import org.schabi.newpipe.extractor.services.youtube.ItagItem;
@ -109,11 +109,11 @@ public class DashMpdParser {
* <p>
* It has video, video only and audio streams and will only add to the list if it don't
* find a similar stream in the respective lists (calling {@link Stream#equalStats}).
*
* <p>
* Info about dash MPD can be found here
* @see <a href="https://www.brendanlong.com/the-structure-of-an-mpeg-dash-mpd.html">www.brendanlog.com</a>
*
* @param streamInfo where the parsed streams will be added
* @see <a href="https://www.brendanlong.com/the-structure-of-an-mpeg-dash-mpd.html">www.brendanlog.com</a>
*/
public static ParserResult getStreams(final StreamInfo streamInfo)
throws DashMpdParsingException, ReCaptchaException {
@ -160,7 +160,7 @@ public class DashMpdParser {
final MediaFormat mediaFormat = MediaFormat.getFromMimeType(mimeType);
if (itag.itagType.equals(ItagItem.ItagType.AUDIO)) {
if(segmentationList == null) {
if (segmentationList == null) {
final AudioStream audioStream = new AudioStream(url, mediaFormat, itag.avgBitrate);
if (!Stream.containSimilarStream(audioStream, streamInfo.getAudioStreams())) {
audioStreams.add(audioStream);
@ -172,7 +172,7 @@ public class DashMpdParser {
} else {
boolean isVideoOnly = itag.itagType.equals(ItagItem.ItagType.VIDEO_ONLY);
if(segmentationList == null) {
if (segmentationList == null) {
final VideoStream videoStream = new VideoStream(url,
mediaFormat,
itag.resolutionString,
@ -191,7 +191,7 @@ public class DashMpdParser {
itag.resolutionString,
isVideoOnly);
if(isVideoOnly) {
if (isVideoOnly) {
segmentedVideoOnlyStreams.add(videoStream);
} else {
segmentedVideoStreams.add(videoStream);

View File

@ -5,7 +5,6 @@ import org.schabi.newpipe.extractor.InfoItem;
import org.schabi.newpipe.extractor.InfoItemsCollector;
import org.schabi.newpipe.extractor.ListExtractor;
import org.schabi.newpipe.extractor.ListExtractor.InfoItemsPage;
import org.schabi.newpipe.extractor.comments.CommentsInfo;
import org.schabi.newpipe.extractor.stream.StreamExtractor;
import org.schabi.newpipe.extractor.stream.StreamInfo;
@ -31,7 +30,7 @@ public class ExtractorHelper {
public static List<InfoItem> getRelatedVideosOrLogError(StreamInfo info, StreamExtractor extractor) {
try {
InfoItemsCollector<? extends InfoItem, ?> collector = extractor.getRelatedStreams();
if(collector == null) return Collections.emptyList();
if (collector == null) return Collections.emptyList();
info.addAllErrors(collector.getErrors());
//noinspection unchecked
@ -41,5 +40,5 @@ public class ExtractorHelper {
return Collections.emptyList();
}
}
}

View File

@ -1,76 +1,85 @@
package org.schabi.newpipe.extractor.utils;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
public class JsonUtils {
public static final JsonObject DEFAULT_EMPTY = new JsonObject();
private JsonUtils() {
}
@Nonnull
public static Object getValue(@Nonnull JsonObject object, @Nonnull String path) throws ParsingException{
public static Object getValue(@Nonnull JsonObject object, @Nonnull String path) throws ParsingException {
List<String> keys = Arrays.asList(path.split("\\."));
object = getObject(object, keys.subList(0, keys.size() - 1));
if (null == object) throw new ParsingException("Unable to get " + path);
Object result = object.get(keys.get(keys.size() - 1));
if(null == result) throw new ParsingException("Unable to get " + path);
if (null == result) throw new ParsingException("Unable to get " + path);
return result;
}
@Nonnull
public static String getString(@Nonnull JsonObject object, @Nonnull String path) throws ParsingException{
public static String getString(@Nonnull JsonObject object, @Nonnull String path) throws ParsingException {
Object value = getValue(object, path);
if(value instanceof String) {
if (value instanceof String) {
return (String) value;
}else {
} else {
throw new ParsingException("Unable to get " + path);
}
}
@Nonnull
public static Number getNumber(@Nonnull JsonObject object, @Nonnull String path) throws ParsingException{
public static Boolean getBoolean(@Nonnull JsonObject object, @Nonnull String path) throws ParsingException {
Object value = getValue(object, path);
if(value instanceof Number) {
if (value instanceof Boolean) {
return (Boolean) value;
} else {
throw new ParsingException("Unable to get " + path);
}
}
@Nonnull
public static Number getNumber(@Nonnull JsonObject object, @Nonnull String path) throws ParsingException {
Object value = getValue(object, path);
if (value instanceof Number) {
return (Number) value;
}else {
} else {
throw new ParsingException("Unable to get " + path);
}
}
@Nonnull
public static JsonObject getObject(@Nonnull JsonObject object, @Nonnull String path) throws ParsingException{
public static JsonObject getObject(@Nonnull JsonObject object, @Nonnull String path) throws ParsingException {
Object value = getValue(object, path);
if(value instanceof JsonObject) {
if (value instanceof JsonObject) {
return (JsonObject) value;
}else {
} else {
throw new ParsingException("Unable to get " + path);
}
}
@Nonnull
public static JsonArray getArray(@Nonnull JsonObject object, @Nonnull String path) throws ParsingException{
public static JsonArray getArray(@Nonnull JsonObject object, @Nonnull String path) throws ParsingException {
Object value = getValue(object, path);
if(value instanceof JsonArray) {
if (value instanceof JsonArray) {
return (JsonArray) value;
}else {
} else {
throw new ParsingException("Unable to get " + path);
}
}
@Nonnull
public static List<Object> getValues(@Nonnull JsonArray array, @Nonnull String path) throws ParsingException {
List<Object> result = new ArrayList<>();
for (int i = 0; i < array.size(); i++) {
JsonObject obj = array.getObject(i);

View File

@ -1,5 +1,10 @@
package org.schabi.newpipe.extractor.utils;
import org.nibor.autolink.LinkExtractor;
import org.nibor.autolink.LinkSpan;
import org.nibor.autolink.LinkType;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
import java.util.ArrayList;
@ -9,11 +14,6 @@ import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.nibor.autolink.LinkExtractor;
import org.nibor.autolink.LinkSpan;
import org.nibor.autolink.LinkType;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
/*
* Created by Christian Schabesberger on 02.02.16.
*
@ -51,7 +51,7 @@ public class Parser {
public static String matchGroup1(String pattern, String input) throws RegexException {
return matchGroup(pattern, input, 1);
}
public static String matchGroup1(Pattern pattern, String input) throws RegexException {
return matchGroup(pattern, input, 1);
}
@ -60,7 +60,7 @@ public class Parser {
Pattern pat = Pattern.compile(pattern);
return matchGroup(pat, input, group);
}
public static String matchGroup(Pattern pat, String input, int group) throws RegexException {
Matcher mat = pat.matcher(input);
boolean foundMatch = mat.find();
@ -102,7 +102,7 @@ public class Parser {
.linkTypes(EnumSet.of(LinkType.URL, LinkType.WWW))
.build();
Iterable<LinkSpan> linkss = linkExtractor.extractLinks(txt);
for(LinkSpan ls : linkss) {
for (LinkSpan ls : linkss) {
links.add(txt.substring(ls.getBeginIndex(), ls.getEndIndex()));
}

View File

@ -1,15 +1,18 @@
package org.schabi.newpipe.extractor.utils;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import java.io.UnsupportedEncodingException;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLDecoder;
import java.util.List;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
public class Utils {
public static final String HTTP = "http://";
public static final String HTTPS = "https://";
private Utils() {
//no instance
}
@ -35,6 +38,7 @@ public class Utils {
* <li>1.23K -&gt; 1230</li>
* <li>1.23M -&gt; 1230000</li>
* </ul>
*
* @param numberWord string to be converted to a long
* @return a long
* @throws NumberFormatException
@ -82,9 +86,6 @@ public class Utils {
}
}
private static final String HTTP = "http://";
private static final String HTTPS = "https://";
public static String replaceHttpWithHttps(final String url) {
if (url == null) return null;
@ -165,17 +166,17 @@ public class Utils {
return setsNoPort || usesDefaultPort;
}
public static String removeUTF8BOM(String s) {
if (s.startsWith("\uFEFF")) {
s = s.substring(1);
}
if (s.endsWith("\uFEFF")) {
s = s.substring(0, s.length()-1);
s = s.substring(0, s.length() - 1);
}
return s;
}
public static String getBaseUrl(String url) throws ParsingException {
URL uri;
try {
@ -185,5 +186,4 @@ public class Utils {
}
return uri.getProtocol() + "://" + uri.getAuthority();
}
}

View File

@ -20,7 +20,7 @@ import java.util.Map;
public class DownloaderTestImpl extends Downloader {
private static final String USER_AGENT = "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0";
private static final String USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:68.0) Gecko/20100101 Firefox/68.0";
private static final String DEFAULT_HTTP_ACCEPT_LANGUAGE = "en";
private static DownloaderTestImpl instance = null;
@ -99,19 +99,25 @@ public class DownloaderTestImpl extends Downloader {
final int responseCode = connection.getResponseCode();
final String responseMessage = connection.getResponseMessage();
final Map<String, List<String>> responseHeaders = connection.getHeaderFields();
final String latestUrl = connection.getURL().toString();
return new Response(responseCode, responseMessage, responseHeaders, response.toString());
return new Response(responseCode, responseMessage, responseHeaders, response.toString(), latestUrl);
} catch (Exception e) {
final int responseCode = connection.getResponseCode();
/*
* HTTP 429 == Too Many Request
* Receive from Youtube.com = ReCaptcha challenge request
* See : https://github.com/rg3/youtube-dl/issues/5138
*/
if (connection.getResponseCode() == 429) {
if (responseCode == 429) {
throw new ReCaptchaException("reCaptcha Challenge requested", url);
} else if (responseCode != -1) {
final String latestUrl = connection.getURL().toString();
return new Response(responseCode, connection.getResponseMessage(), connection.getHeaderFields(), null, latestUrl);
}
throw new IOException(connection.getResponseCode() + " " + connection.getResponseMessage(), e);
throw new IOException("Error occurred while fetching the content", e);
} finally {
if (outputStream != null) outputStream.close();
if (input != null) input.close();

View File

@ -2,20 +2,29 @@ package org.schabi.newpipe.extractor.services;
import org.schabi.newpipe.extractor.InfoItem;
import org.schabi.newpipe.extractor.ListExtractor;
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.channel.ChannelInfoItem;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.linkhandler.LinkHandlerFactory;
import org.schabi.newpipe.extractor.linkhandler.ListLinkHandlerFactory;
import org.schabi.newpipe.extractor.localization.DateWrapper;
import org.schabi.newpipe.extractor.playlist.PlaylistInfoItem;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import java.util.Calendar;
import java.util.List;
import static junit.framework.TestCase.assertFalse;
import static org.junit.Assert.*;
import static org.schabi.newpipe.extractor.ExtractorAsserts.*;
import static org.schabi.newpipe.extractor.StreamingService.*;
public final class DefaultTests {
public static void defaultTestListOfItems(int expectedServiceId, List<? extends InfoItem> itemsList, List<Throwable> errors) {
assertTrue("List of items is empty", !itemsList.isEmpty());
public static void defaultTestListOfItems(StreamingService expectedService, List<? extends InfoItem> itemsList, List<Throwable> errors) throws ParsingException {
assertFalse("List of items is empty", itemsList.isEmpty());
assertFalse("List of items contains a null element", itemsList.contains(null));
assertEmptyErrors("Errors during stream list extraction", errors);
assertEmptyErrors("Errors during extraction", errors);
for (InfoItem item : itemsList) {
assertIsSecureUrl(item.getUrl());
@ -23,12 +32,17 @@ public final class DefaultTests {
assertIsSecureUrl(item.getThumbnailUrl());
}
assertNotNull("InfoItem type not set: " + item, item.getInfoType());
assertEquals("Service id doesn't match: " + item, expectedServiceId, item.getServiceId());
assertEquals("Unexpected item service id", expectedService.getServiceId(), item.getServiceId());
assertNotEmpty("Item name not set: " + item, item.getName());
if (item instanceof StreamInfoItem) {
StreamInfoItem streamInfoItem = (StreamInfoItem) item;
assertNotEmpty("Uploader name not set: " + item, streamInfoItem.getUploaderName());
assertNotEmpty("Uploader url not set: " + item, streamInfoItem.getUploaderUrl());
assertIsSecureUrl(streamInfoItem.getUploaderUrl());
assertExpectedLinkType(expectedService, streamInfoItem.getUrl(), LinkType.STREAM);
assertExpectedLinkType(expectedService, streamInfoItem.getUploaderUrl(), LinkType.CHANNEL);
final String textualUploadDate = streamInfoItem.getTextualUploadDate();
if (textualUploadDate != null && !textualUploadDate.isEmpty()) {
@ -37,34 +51,56 @@ public final class DefaultTests {
assertTrue("Upload date not in the past", uploadDate.date().before(Calendar.getInstance()));
}
} else if (item instanceof ChannelInfoItem) {
final ChannelInfoItem channelInfoItem = (ChannelInfoItem) item;
assertExpectedLinkType(expectedService, channelInfoItem.getUrl(), LinkType.CHANNEL);
} else if (item instanceof PlaylistInfoItem) {
final PlaylistInfoItem playlistInfoItem = (PlaylistInfoItem) item;
assertExpectedLinkType(expectedService, playlistInfoItem.getUrl(), LinkType.PLAYLIST);
}
}
}
public static <T extends InfoItem> ListExtractor.InfoItemsPage<T> defaultTestRelatedItems(ListExtractor<T> extractor, int expectedServiceId) throws Exception {
private static void assertExpectedLinkType(StreamingService expectedService, String url, LinkType expectedLinkType) throws ParsingException {
final LinkType linkTypeByUrl = expectedService.getLinkTypeByUrl(url);
assertNotEquals("Url is not recognized by its own service: \"" + url + "\"",
LinkType.NONE, linkTypeByUrl);
assertEquals("Service returned wrong link type for: \"" + url + "\"",
expectedLinkType, linkTypeByUrl);
}
public static <T extends InfoItem> void assertNoMoreItems(ListExtractor<T> extractor) throws Exception {
assertFalse("More items available when it shouldn't", extractor.hasNextPage());
final String nextPageUrl = extractor.getNextPageUrl();
assertTrue("Next page is not empty or null", nextPageUrl == null || nextPageUrl.isEmpty());
}
public static <T extends InfoItem> ListExtractor.InfoItemsPage<T> defaultTestRelatedItems(ListExtractor<T> extractor) throws Exception {
final ListExtractor.InfoItemsPage<T> page = extractor.getInitialPage();
final List<T> itemsList = page.getItems();
List<Throwable> errors = page.getErrors();
defaultTestListOfItems(expectedServiceId, itemsList, errors);
defaultTestListOfItems(extractor.getService(), itemsList, errors);
return page;
}
public static <T extends InfoItem> ListExtractor.InfoItemsPage<T> defaultTestMoreItems(ListExtractor<T> extractor, int expectedServiceId) throws Exception {
public static <T extends InfoItem> ListExtractor.InfoItemsPage<T> defaultTestMoreItems(ListExtractor<T> extractor) throws Exception {
assertTrue("Doesn't have more items", extractor.hasNextPage());
ListExtractor.InfoItemsPage<T> nextPage = extractor.getPage(extractor.getNextPageUrl());
final List<T> items = nextPage.getItems();
assertTrue("Next page is empty", !items.isEmpty());
assertFalse("Next page is empty", items.isEmpty());
assertEmptyErrors("Next page have errors", nextPage.getErrors());
defaultTestListOfItems(expectedServiceId, nextPage.getItems(), nextPage.getErrors());
defaultTestListOfItems(extractor.getService(), nextPage.getItems(), nextPage.getErrors());
return nextPage;
}
public static void defaultTestGetPageInNewExtractor(ListExtractor<? extends InfoItem> extractor, ListExtractor<? extends InfoItem> newExtractor, int expectedServiceId) throws Exception {
public static void defaultTestGetPageInNewExtractor(ListExtractor<? extends InfoItem> extractor, ListExtractor<? extends InfoItem> newExtractor) throws Exception {
final String nextPageUrl = extractor.getNextPageUrl();
final ListExtractor.InfoItemsPage<? extends InfoItem> page = newExtractor.getPage(nextPageUrl);
defaultTestListOfItems(expectedServiceId, page.getItems(), page.getErrors());
defaultTestListOfItems(extractor.getService(), page.getItems(), page.getErrors());
}
}

View File

@ -4,47 +4,86 @@ import org.junit.BeforeClass;
import org.junit.Test;
import org.schabi.newpipe.DownloaderTestImpl;
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.channel.ChannelExtractor;
import org.schabi.newpipe.extractor.services.media_ccc.extractors.MediaCCCConferenceExtractor;
import static junit.framework.TestCase.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.schabi.newpipe.extractor.ServiceList.MediaCCC;
/**
* Test {@link MediaCCCConferenceExtractor}
*/
public class MediaCCCConferenceExtractorTest {
private static ChannelExtractor extractor;
@BeforeClass
public static void setUpClass() throws Exception {
NewPipe.init(DownloaderTestImpl.getInstance());
extractor = MediaCCC.getChannelExtractor("https://api.media.ccc.de/public/conferences/froscon2017");
extractor.fetchPage();
public static class FrOSCon2017 {
private static MediaCCCConferenceExtractor extractor;
@BeforeClass
public static void setUpClass() throws Exception {
NewPipe.init(DownloaderTestImpl.getInstance());
extractor = (MediaCCCConferenceExtractor) MediaCCC.getChannelExtractor("https://media.ccc.de/c/froscon2017");
extractor.fetchPage();
}
@Test
public void testName() throws Exception {
assertEquals("FrOSCon 2017", extractor.getName());
}
@Test
public void testGetUrl() throws Exception {
assertEquals("https://api.media.ccc.de/public/conferences/froscon2017", extractor.getUrl());
}
@Test
public void testGetOriginalUrl() throws Exception {
assertEquals("https://media.ccc.de/c/froscon2017", extractor.getOriginalUrl());
}
@Test
public void testGetThumbnailUrl() throws Exception {
assertEquals("https://static.media.ccc.de/media/events/froscon/2017/logo.png", extractor.getAvatarUrl());
}
@Test
public void testGetInitalPage() throws Exception {
assertEquals(97, extractor.getInitialPage().getItems().size());
}
}
@Test
public void testName() throws Exception {
assertEquals("FrOSCon 2017", extractor.getName());
}
public static class Oscal2019 {
private static MediaCCCConferenceExtractor extractor;
@Test
public void testGetUrl() throws Exception {
assertEquals("https://api.media.ccc.de/public/conferences/froscon2017", extractor.getUrl());
}
@BeforeClass
public static void setUpClass() throws Exception {
NewPipe.init(DownloaderTestImpl.getInstance());
extractor = (MediaCCCConferenceExtractor) MediaCCC.getChannelExtractor("https://media.ccc.de/c/oscal19");
extractor.fetchPage();
}
@Test
public void testGetOriginalUrl() throws Exception {
assertEquals("https://media.ccc.de/c/froscon2017", extractor.getOriginalUrl());
}
@Test
public void testName() throws Exception {
assertEquals("Open Source Conference Albania 2019", extractor.getName());
}
@Test
public void testGetThumbnailUrl() throws Exception {
assertEquals("https://static.media.ccc.de/media/events/froscon/2017/logo.png", extractor.getAvatarUrl());
}
@Test
public void testGetUrl() throws Exception {
assertEquals("https://api.media.ccc.de/public/conferences/oscal19", extractor.getUrl());
}
@Test
public void testGetInitalPage() throws Exception {
assertEquals(97,extractor.getInitialPage().getItems().size());
@Test
public void testGetOriginalUrl() throws Exception {
assertEquals("https://media.ccc.de/c/oscal19", extractor.getOriginalUrl());
}
@Test
public void testGetThumbnailUrl() throws Exception {
assertEquals("https://static.media.ccc.de/media/events/oscal/2019/oscal-19.png", extractor.getAvatarUrl());
}
@Test
public void testGetInitalPage() throws Exception {
assertTrue(extractor.getInitialPage().getItems().size() >= 21);
}
}
}

View File

@ -24,7 +24,7 @@ public class MediaCCCConferenceListExtractorTest {
@BeforeClass
public static void setUpClass() throws Exception {
NewPipe.init(DownloaderTestImpl.getInstance());
extractor = MediaCCC.getKioskList().getDefaultKioskExtractor();
extractor = MediaCCC.getKioskList().getDefaultKioskExtractor();
extractor.fetchPage();
}
@ -49,8 +49,8 @@ public class MediaCCCConferenceListExtractorTest {
}
private boolean contains(List<InfoItem> itemList, String name) {
for(InfoItem item : itemList) {
if(item.getName().equals(name))
for (InfoItem item : itemList) {
if (item.getName().equals(name))
return true;
}
return false;

View File

@ -22,7 +22,7 @@ public class MediaCCCOggTest {
public static void setUpClass() throws Exception {
NewPipe.init(DownloaderTestImpl.getInstance());
extractor = MediaCCC.getStreamExtractor("https://api.media.ccc.de/public/events/1317");
extractor = MediaCCC.getStreamExtractor("https://api.media.ccc.de/public/events/1317");
extractor.fetchPage();
}
@ -33,7 +33,7 @@ public class MediaCCCOggTest {
@Test
public void getAudioStreamsContainOgg() throws Exception {
for(AudioStream stream : extractor.getAudioStreams()) {
for (AudioStream stream : extractor.getAudioStreams()) {
assertEquals("OGG", stream.getFormat().toString());
}
}

View File

@ -28,7 +28,7 @@ public class MediaCCCSearchExtractorAllTest {
@BeforeClass
public static void setUpClass() throws Exception {
NewPipe.init(DownloaderTestImpl.getInstance());
extractor = MediaCCC.getSearchExtractor( new MediaCCCSearchQueryHandlerFactory()
extractor = MediaCCC.getSearchExtractor(new MediaCCCSearchQueryHandlerFactory()
.fromQuery("c3", Arrays.asList(new String[0]), ""));
extractor.fetchPage();
itemsPage = extractor.getInitialPage();
@ -37,8 +37,8 @@ public class MediaCCCSearchExtractorAllTest {
@Test
public void testIfChannelInfoItemsAvailable() {
boolean isAvialable = false;
for(InfoItem item : itemsPage.getItems()) {
if(item instanceof ChannelInfoItem) {
for (InfoItem item : itemsPage.getItems()) {
if (item instanceof ChannelInfoItem) {
isAvialable = true;
}
}
@ -48,8 +48,8 @@ public class MediaCCCSearchExtractorAllTest {
@Test
public void testIfStreamInfoitemsAvailable() {
boolean isAvialable = false;
for(InfoItem item : itemsPage.getItems()) {
if(item instanceof StreamInfoItem) {
for (InfoItem item : itemsPage.getItems()) {
if (item instanceof StreamInfoItem) {
isAvialable = true;
}
}

View File

@ -27,7 +27,7 @@ public class MediaCCCSearchExtractorConferencesTest {
@BeforeClass
public static void setUpClass() throws Exception {
NewPipe.init(DownloaderTestImpl.getInstance());
extractor = MediaCCC.getSearchExtractor( new MediaCCCSearchQueryHandlerFactory()
extractor = MediaCCC.getSearchExtractor(new MediaCCCSearchQueryHandlerFactory()
.fromQuery("c3", Arrays.asList(new String[]{"conferences"}), ""));
extractor.fetchPage();
itemsPage = extractor.getInitialPage();
@ -35,7 +35,7 @@ public class MediaCCCSearchExtractorConferencesTest {
@Test
public void testReturnTypeChannel() {
for(InfoItem item : itemsPage.getItems()) {
for (InfoItem item : itemsPage.getItems()) {
assertTrue("Item is not of type channel", item instanceof ChannelInfoItem);
}
}

View File

@ -28,7 +28,7 @@ public class MediaCCCSearchExtractorEventsTest {
@BeforeClass
public static void setUpClass() throws Exception {
NewPipe.init(DownloaderTestImpl.getInstance());
extractor = MediaCCC.getSearchExtractor( new MediaCCCSearchQueryHandlerFactory()
extractor = MediaCCC.getSearchExtractor(new MediaCCCSearchQueryHandlerFactory()
.fromQuery("linux", Arrays.asList(new String[]{"events"}), ""));
extractor.fetchPage();
itemsPage = extractor.getInitialPage();
@ -65,7 +65,7 @@ public class MediaCCCSearchExtractorEventsTest {
@Test
public void testReturnTypeStream() throws Exception {
for(InfoItem item : itemsPage.getItems()) {
for (InfoItem item : itemsPage.getItems()) {
assertTrue("Item is not of type StreamInfoItem", item instanceof StreamInfoItem);
}
}

View File

@ -6,96 +6,201 @@ import org.junit.Test;
import org.schabi.newpipe.DownloaderTestImpl;
import org.schabi.newpipe.extractor.NewPipe;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.services.BaseExtractorTest;
import org.schabi.newpipe.extractor.services.media_ccc.extractors.MediaCCCStreamExtractor;
import org.schabi.newpipe.extractor.stream.StreamExtractor;
import org.schabi.newpipe.extractor.stream.AudioStream;
import org.schabi.newpipe.extractor.stream.VideoStream;
import org.schabi.newpipe.extractor.utils.UtilsTest;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.List;
import static java.util.Objects.requireNonNull;
import static junit.framework.TestCase.assertEquals;
import static org.schabi.newpipe.extractor.ExtractorAsserts.assertIsSecureUrl;
import static org.schabi.newpipe.extractor.ServiceList.MediaCCC;
/**
* Test {@link MediaCCCStreamExtractor}
*/
public class MediaCCCStreamExtractorTest implements BaseExtractorTest {
private static StreamExtractor extractor;
public class MediaCCCStreamExtractorTest {
@BeforeClass
public static void setUpClass() throws Exception {
NewPipe.init(DownloaderTestImpl.getInstance());
public static class Gpn18Tmux {
private static MediaCCCStreamExtractor extractor;
extractor = MediaCCC.getStreamExtractor("https://api.media.ccc.de/public/events/8afc16c2-d76a-53f6-85e4-90494665835d");
extractor.fetchPage();
@BeforeClass
public static void setUpClass() throws Exception {
NewPipe.init(DownloaderTestImpl.getInstance());
extractor = (MediaCCCStreamExtractor) MediaCCC.getStreamExtractor("https://media.ccc.de/v/gpn18-105-tmux-warum-ein-schwarzes-fenster-am-bildschirm-reicht");
extractor.fetchPage();
}
@Test
public void testServiceId() throws Exception {
assertEquals(2, extractor.getServiceId());
}
@Test
public void testName() throws Exception {
assertEquals("tmux - Warum ein schwarzes Fenster am Bildschirm reicht", extractor.getName());
}
@Test
public void testId() throws Exception {
assertEquals("gpn18-105-tmux-warum-ein-schwarzes-fenster-am-bildschirm-reicht", extractor.getId());
}
@Test
public void testUrl() throws Exception {
assertIsSecureUrl(extractor.getUrl());
assertEquals("https://api.media.ccc.de/public/events/gpn18-105-tmux-warum-ein-schwarzes-fenster-am-bildschirm-reicht", extractor.getUrl());
}
@Test
public void testOriginalUrl() throws Exception {
assertIsSecureUrl(extractor.getOriginalUrl());
assertEquals("https://media.ccc.de/v/gpn18-105-tmux-warum-ein-schwarzes-fenster-am-bildschirm-reicht", extractor.getOriginalUrl());
}
@Test
public void testThumbnail() throws Exception {
assertIsSecureUrl(extractor.getThumbnailUrl());
assertEquals("https://static.media.ccc.de/media/events/gpn/gpn18/105-hd.jpg", extractor.getThumbnailUrl());
}
@Test
public void testUploaderName() throws Exception {
assertEquals("gpn18", extractor.getUploaderName());
}
@Test
public void testUploaderUrl() throws Exception {
assertIsSecureUrl(extractor.getUploaderUrl());
assertEquals("https://api.media.ccc.de/public/conferences/gpn18", extractor.getUploaderUrl());
}
@Test
public void testUploaderAvatarUrl() throws Exception {
assertIsSecureUrl(extractor.getUploaderAvatarUrl());
assertEquals("https://static.media.ccc.de/media/events/gpn/gpn18/logo.png", extractor.getUploaderAvatarUrl());
}
@Test
public void testVideoStreams() throws Exception {
List<VideoStream> videoStreamList = extractor.getVideoStreams();
assertEquals(4, videoStreamList.size());
for (VideoStream stream : videoStreamList) {
assertIsSecureUrl(stream.getUrl());
}
}
@Test
public void testAudioStreams() throws Exception {
List<AudioStream> audioStreamList = extractor.getAudioStreams();
assertEquals(2, audioStreamList.size());
for (AudioStream stream : audioStreamList) {
assertIsSecureUrl(stream.getUrl());
}
}
@Test
public void testGetTextualUploadDate() throws ParsingException {
Assert.assertEquals("2018-05-11T02:00:00.000+02:00", extractor.getTextualUploadDate());
}
@Test
public void testGetUploadDate() throws ParsingException, ParseException {
final Calendar instance = Calendar.getInstance();
instance.setTime(new SimpleDateFormat("yyyy-MM-dd").parse("2018-05-11"));
assertEquals(instance, requireNonNull(extractor.getUploadDate()).date());
}
}
@Override
public void testServiceId() throws Exception {
assertEquals(2, extractor.getServiceId());
}
public static class _36c3PrivacyMessaging {
private static MediaCCCStreamExtractor extractor;
@Override
public void testName() throws Exception {
assertEquals("tmux - Warum ein schwarzes Fenster am Bildschirm reicht", extractor.getName());
}
@BeforeClass
public static void setUpClass() throws Exception {
NewPipe.init(DownloaderTestImpl.getInstance());
extractor = (MediaCCCStreamExtractor) MediaCCC.getStreamExtractor("https://media.ccc.de/v/36c3-10565-what_s_left_for_private_messaging");
extractor.fetchPage();
}
@Override
public void testId() throws Exception {
assertEquals("", extractor.getId());
}
@Test
public void testName() throws Exception {
assertEquals("What's left for private messaging?", extractor.getName());
}
@Override
public void testUrl() throws Exception {
assertEquals("", extractor.getUrl());
}
@Test
public void testId() throws Exception {
assertEquals("36c3-10565-what_s_left_for_private_messaging", extractor.getId());
}
@Override
public void testOriginalUrl() throws Exception {
assertEquals("", extractor.getOriginalUrl());
}
@Test
public void testUrl() throws Exception {
assertIsSecureUrl(extractor.getUrl());
assertEquals("https://api.media.ccc.de/public/events/36c3-10565-what_s_left_for_private_messaging", extractor.getUrl());
}
@Test
public void testThumbnail() throws Exception {
assertEquals("https://static.media.ccc.de/media/events/gpn/gpn18/105-hd.jpg", extractor.getThumbnailUrl());
}
@Test
public void testOriginalUrl() throws Exception {
assertIsSecureUrl(extractor.getOriginalUrl());
assertEquals("https://media.ccc.de/v/36c3-10565-what_s_left_for_private_messaging", extractor.getOriginalUrl());
}
@Test
public void testUploaderName() throws Exception {
assertEquals("gpn18", extractor.getUploaderName());
}
@Test
public void testThumbnail() throws Exception {
assertIsSecureUrl(extractor.getThumbnailUrl());
assertEquals("https://static.media.ccc.de/media/congress/2019/10565-hd.jpg", extractor.getThumbnailUrl());
}
@Test
public void testUploaderUrl() throws Exception {
assertEquals("https://api.media.ccc.de/public/conferences/gpn18", extractor.getUploaderUrl());
}
@Test
public void testUploaderName() throws Exception {
assertEquals("36c3", extractor.getUploaderName());
}
@Test
public void testUploaderAvatarUrl() throws Exception {
assertEquals("https://static.media.ccc.de/media/events/gpn/gpn18/logo.png", extractor.getUploaderAvatarUrl());
}
@Test
public void testUploaderUrl() throws Exception {
assertIsSecureUrl(extractor.getUploaderUrl());
assertEquals("https://api.media.ccc.de/public/conferences/36c3", extractor.getUploaderUrl());
}
@Test
public void testVideoStreams() throws Exception {
assertEquals(4, extractor.getVideoStreams().size());
}
@Test
public void testUploaderAvatarUrl() throws Exception {
assertIsSecureUrl(extractor.getUploaderAvatarUrl());
assertEquals("https://static.media.ccc.de/media/congress/2019/logo.png", extractor.getUploaderAvatarUrl());
}
@Test
public void testAudioStreams() throws Exception {
assertEquals(2, extractor.getAudioStreams().size());
}
@Test
public void testVideoStreams() throws Exception {
List<VideoStream> videoStreamList = extractor.getVideoStreams();
assertEquals(8, videoStreamList.size());
for (VideoStream stream : videoStreamList) {
assertIsSecureUrl(stream.getUrl());
}
}
@Test
public void testGetTextualUploadDate() throws ParsingException {
Assert.assertEquals("2018-05-11T02:00:00.000+02:00", extractor.getTextualUploadDate());
}
@Test
public void testAudioStreams() throws Exception {
List<AudioStream> audioStreamList = extractor.getAudioStreams();
assertEquals(2, audioStreamList.size());
for (AudioStream stream : audioStreamList) {
assertIsSecureUrl(stream.getUrl());
}
}
@Test
public void testGetUploadDate() throws ParsingException, ParseException {
final Calendar instance = Calendar.getInstance();
instance.setTime(new SimpleDateFormat("yyyy-MM-dd").parse("2018-05-11"));
assertEquals(instance, requireNonNull(extractor.getUploadDate()).date());
@Test
public void testGetTextualUploadDate() throws ParsingException {
Assert.assertEquals("2020-01-11T01:00:00.000+01:00", extractor.getTextualUploadDate());
}
@Test
public void testGetUploadDate() throws ParsingException, ParseException {
final Calendar instance = Calendar.getInstance();
instance.setTime(new SimpleDateFormat("yyyy-MM-dd").parse("2020-01-11"));
assertEquals(instance, requireNonNull(extractor.getUploadDate()).date());
}
}
}
}

Some files were not shown because too many files have changed in this diff Show More