I'm trying to figure out what i'm doing wrong when broadcast to live. I have my apk on my android phone stream to youtube. I have no problem streaming for the first time. But if I stop it for about 2 - 3 minutes, and stream again, the lifeCycleStatus of the boardcast keep stay at "liveStarting" and my video is not visible to audience. If I stop long enough, restart stream, the status will go 'live' in about 10 seconds. I have enableMonitorStream disable, and also try LiveBroadcasts.Transition, but it return error redundantTransition. But the lifeCycleStatus wouldn't turn to 'live'. I also try create a new live broadcast and live stream and change the status to 'live' manually, but both get stuck on 'liveStarting'.
public class YoutubeService {
private WeakReference<Context> context;
private static final String PREF_ACCOUNT_NAME = "youtube_account_name";
private static final String[] SCOPES = {YouTubeScopes.YOUTUBE};
private GoogleAccountCredential mCredential;
private YouTube mService;
public YoutubeService(Context context) {
this.context = new WeakReference<Context>(context);
// create account credential
mCredential = GoogleAccountCredential.usingOAuth2(
context, Arrays.asList(SCOPES))
.setBackOff(new ExponentialBackOff());
mCredential.setSelectedAccountName("xxxx#gmail.com");
// create youtube builder
HttpTransport transport = AndroidHttp.newCompatibleTransport();
JsonFactory jsonFactory = JacksonFactory.getDefaultInstance();
mService = new YouTube.Builder(
transport, jsonFactory, mCredential)
.setApplicationName("My App Name")
.build();
}
// AsyncTask<Void, Void, Map<String, String>>
private void getRtmpUrl() {
try {
// get livebroadcast list
YouTube.LiveBroadcasts.List liveBroadcastRequest = mService.liveBroadcasts().list("id,snippet,contentDetails,status");
liveBroadcastRequest.setBroadcastType("persistent");
liveBroadcastRequest.setMine(true);
LiveBroadcastListResponse liveBroadcastListResponse = liveBroadcastRequest.execute();
List<LiveBroadcast> liveBroadcastList = liveBroadcastListResponse.getItems();
if (liveBroadcastList != null && liveBroadcastList.size() > 0) {
LiveBroadcast liveBroadcast = liveBroadcastList.get(0);
String streamId = liveBroadcast.getContentDetails().getBoundStreamId();
// get livestream list
YouTube.LiveStreams.List livestreamRequest = mService.liveStreams().list("id,cdn");
livestreamRequest.setId(streamId);
LiveStreamListResponse liveStreamListResponse = livestreamRequest.execute();
List<LiveStream> liveStreamList = liveStreamListResponse.getItems();
if (liveStreamList != null && liveStreamList.size() > 0) {
LiveStream liveStream = liveStreamList.get(0);
String serverUrl = liveStream.getCdn().getIngestionInfo().getIngestionAddress();
String streamName = liveStream.getCdn().getIngestionInfo().getStreamName();
String rtmpUrl = serverUrl + "/" + streamName;
// use this rtmpUrl for streaming
}
}
} catch (IOException e) {
e.printStackTrace();
}
}
// call 30 seconds after press start streaming
private void checkStatus() {
try {
// get livebroadcast list
YouTube.LiveBroadcasts.List liveBroadcastRequest = mService.liveBroadcasts().list("id,snippet,contentDetails,status");
liveBroadcastRequest.setBroadcastType("persistent");
liveBroadcastRequest.setMine(true);
LiveBroadcastListResponse liveBroadcastListResponse = liveBroadcastRequest.execute();
List<LiveBroadcast> liveBroadcastList = liveBroadcastListResponse.getItems();
if (liveBroadcastList != null && liveBroadcastList.size() > 0) {
LiveBroadcast liveBroadcast = liveBroadcastList.get(0);
// get lifeCycleStatus
String lifeCycleStatus = liveBroadcast.getStatus().getLifeCycleStatus();
String recordingStatus = liveBroadcast.getStatus().getRecordingStatus();
if (lifeCycleStatus != null && lifeCycleStatus.equalsIgnoreCase("live") && recordingStatus != null && recordingStatus.equalsIgnoreCase("recording")) {
String videoId = liveBroadcast.getId();
// the url to watch is www.youtube.com/watch?v=videoId
// video is visible to audience
} else {
// the status is stuck at 'liveStarting', video is not visible to audience
// "status":{"lifeCycleStatus":"liveStarting","privacyStatus":"public","recordingStatus":"recording"}
// check the status of livestream
String boundStreamId = liveBroadcast.getContentDetails().getBoundStreamId();
YouTube.LiveStreams.List livestreamRequest = mService.liveStreams().list("id,cdn,status");
livestreamRequest.setId(boundStreamId);
LiveStreamListResponse liveStreamListResponse = livestreamRequest.execute();
List<LiveStream> liveStreamList = liveStreamListResponse.getItems();
if (liveStreamList != null && liveStreamList.size() > 0) {
LiveStream liveStream = liveStreamList.get(0);
String streamStatus = liveStream.getStatus().getStreamStatus();
if (streamStatus.equalsIgnoreCase("active")) {
// Log.e(TAG,"need to transite to live, liveBroadcastId = " + liveBroadcast.getId());
YouTube.LiveBroadcasts.Transition liveBroadcastTransitionRequest =
mService.liveBroadcasts().transition("live", liveBroadcast.getId(), "id,status");
LiveBroadcast liveBroadcastTransitionResponse = liveBroadcastTransitionRequest.execute();
// get error here
// error 403 Forbidden
// {
// "code" : 403,
// "errors" : [ {
// "domain" : "youtube.liveBroadcast",
// "message" : "Redundant transition",
// "reason" : "redundantTransition"
// } ],
// "message" : "Redundant transition"
// }
}
}
}
}
} catch (IOException e) {
e.printStackTrace();
}
}
// also try to create livebroadcast and live stream instead of the default one
private void createLiveBroadcast() {
try {
// step 1 create live broadcast
LiveBroadcastSnippet broadcastSnippet = new LiveBroadcastSnippet();
broadcastSnippet.setTitle("Test live broadcast title");
Date currentTime = Calendar.getInstance().getTime();
broadcastSnippet.setScheduledStartTime(new DateTime(currentTime));
LiveBroadcastStatus status = new LiveBroadcastStatus();
status.setPrivacyStatus("public");
// disable MonitorStreamInfo to change transition from ready to live
// refrence https://stackoverflow.com/questions/35003786/cannot-make-transition-of-my-youtube-broadcast-to-live-using-youtube-api
LiveBroadcastContentDetails contentDetails = new LiveBroadcastContentDetails();
MonitorStreamInfo monitorStream = new MonitorStreamInfo();
monitorStream.setEnableMonitorStream(false);
contentDetails.setMonitorStream(monitorStream);
LiveBroadcast broadcast = new LiveBroadcast();
broadcast.setKind("youtube#liveBroadcast");
broadcast.setSnippet(broadcastSnippet);
broadcast.setStatus(status);
broadcast.setContentDetails(contentDetails);
YouTube.LiveBroadcasts.Insert liveBroadcastInsert =
mService.liveBroadcasts().insert("snippet,contentDetails,status", broadcast);
LiveBroadcast returnedBroadcast = liveBroadcastInsert.execute();
// step 2 create live stream
String streamTitle = "Test Live Stream title";
LiveStreamSnippet streamSnippet = new LiveStreamSnippet();
streamSnippet.setTitle(streamTitle);
CdnSettings cdnSettings = new CdnSettings();
cdnSettings.setFormat("720p");
cdnSettings.setIngestionType("rtmp");
LiveStream stream = new LiveStream();
stream.setKind("youtube#liveStream");
stream.setSnippet(streamSnippet);
stream.setCdn(cdnSettings);
YouTube.LiveStreams.Insert liveStreamInsert =
mService.liveStreams().insert("snippet,cdn", stream);
LiveStream returnedStream = liveStreamInsert.execute();
if (returnedStream != null) {
YouTube.LiveBroadcasts.Bind liveBroadcastBind =
mService.liveBroadcasts().bind(returnedBroadcast.getId(), "id,snippet,contentDetails,status");
liveBroadcastBind.setStreamId(returnedStream.getId());
returnedBroadcast = liveBroadcastBind.execute();
String serverUrl = returnedStream.getCdn().getIngestionInfo().getIngestionAddress();
String streamName = returnedStream.getCdn().getIngestionInfo().getStreamName();
String rtmpUrl = serverUrl + "/" + streamName;
// use this rtmpUrl for streaming
}
} catch (IOException e) {
e.printStackTrace();
}
}
}
Related
I'm currently working on a project that has to rely heavily on MQTT - one of the parts that needs to utilize MQTT is a ASP Net API, but I'm having difficulties receiving messages.
Here is my MQTTHandler:
public MQTTHandler()
{
_mqttUrl = Properties.Resources.mqttURL ?? "";
_mqttPort = Properties.Resources.mqttPort ?? "";
_mqttUsername = Properties.Resources.mqttUsername ?? "";
_mqttPassword = Properties.Resources.mqttUsername ?? "";
_mqttFactory = new MqttFactory();
_tls = false;
}
public async Task<IManagedMqttClient> ConnectClientAsync()
{
var clientID = Guid.NewGuid().ToString();
var messageBuilder = new MqttClientOptionsBuilder()
.WithClientId(clientID)
.WithCredentials(_mqttUsername, _mqttPassword)
.WithTcpServer(_mqttUrl, Convert.ToInt32(_mqttPort));
var options = _tls ? messageBuilder.WithTls().Build() : messageBuilder.Build();
var managedOptions = new ManagedMqttClientOptionsBuilder()
.WithAutoReconnectDelay(TimeSpan.FromSeconds(5))
.WithClientOptions(options)
.Build();
_mqttClient = new MqttFactory().CreateManagedMqttClient();
await _mqttClient.StartAsync(managedOptions);
Console.WriteLine("Klient startet");
return _mqttClient;
}
public async Task PublishAsync(string topic, string payload, bool retainFlag = true, int qos = 1)
{
await _mqttClient.EnqueueAsync(new MqttApplicationMessageBuilder()
.WithTopic(topic)
.WithPayload(payload)
.WithQualityOfServiceLevel((MQTTnet.Protocol.MqttQualityOfServiceLevel)qos)
.WithRetainFlag(retainFlag)
.Build());
Console.WriteLine("Besked published");
}
public async Task SubscribeAsync(string topic, int qos = 1)
{
var topicFilters = new List<MQTTnet.Packets.MqttTopicFilter>
{
new MqttTopicFilterBuilder()
.WithTopic(topic)
.WithQualityOfServiceLevel((MQTTnet.Protocol.MqttQualityOfServiceLevel)(qos))
.Build()
};
await _mqttClient.SubscribeAsync(topicFilters);
}
public Status GetSystemStatus(MqttApplicationMessageReceivedEventArgs e)
{
try
{
var json = Encoding.UTF8.GetString(e.ApplicationMessage.Payload);
var status = JsonSerializer.Deserialize<Status>(json);
if (status != null)
{
return status;
}
else
{
return null;
}
}
catch (Exception)
{
throw;
}
}
The above has been tested with a console app and works as it should.
The reason I need MQTT in the APi is that a POST method has to act on the value of a topic;
In particular I need to check a systems status before allowing the post;
[HttpPost]
public async Task<ActionResult<Order>> PostOrder(Order order)
{
if (_lastStatus != null)
{
if (_lastStatus.OpStatus)
{
return StatusCode(400, "System is busy!");
}
else
{
var response = await _orderManager.AddOrder(order);
return StatusCode(response.StatusCode, response.Message);
}
}
return StatusCode(400, "Something went wrong");
}
So I will need to set up a subscriber for this controller, and set the value of _lastStatus on received messages:
private readonly MQTTHandler _mqttHandler;
private IManagedMqttClient _mqttClient;
private Status _lastStatus;
public OrdersController(OrderManager orderManager)
{
_orderManager = orderManager;
_mqttHandler = new MQTTHandler();
_mqttClient = _mqttHandler.ConnectClientAsync().Result;
_mqttHandler.SubscribeAsync("JSON/Status");
_mqttClient.ApplicationMessageReceivedAsync += e =>
{
_lastStatus = _mqttHandler.GetSystemStatus(e);
return Task.CompletedTask;
};
}
However, it's behaving a little odd and I'm not experienced enough to know why.
The first time I make a POST request, _lastStatus is null - every following POST request seem to have the last retained message.
I'm guessing that I am struggling due to stuff being asynchronous, but not sure, and every attempt I've attempted to make it synchronous have failed.
Anyone have a clue about what I'm doing wrong?
I am trying to breakdown the process of the webrtc signaling using the PeerConnectionStates Demo by copying the signaling strings manually like as in here Youtube:https://www.youtube.com/watch?v=YLPRBYTeoF4&t=1594s .
Github: https://github.com/chrisuehlinger/serverless-webrtc (from 25:00)
With success I would be able to troubleshoot any signaling problems from Pubnub, Firebase or any other signaling solution I may choose.
I have buttons Start, Call, Exchange and Hang.
The 'Exchange' Button shows an Interactive Dialog which has buttons
Copy Offer , Paste Offer, Set Offer, Copy Answer, Paste Answer, Set Answer. And also 'Copy Offer 2 from text file', and 'Copy Answer 2 from text file'.
If you click on the 1st set of the Dialog buttons in that order (excluding the last 2 file selection buttons), you complete the negotiation as in the original demo.
But I want it to be between 2 devices so I put the offer string from device A into a text file and paste into the text field of device B using Copy Offer 2 from text file, then click 'set offer' to generate an answer which I copy into a text file and send to device A using Copy Answer 2 from text file.
I was not able to place a finger on what I was missing. Any help is appreciated.
public class NewClass extends Form implements AutoCloseable {
private RTCVideoElement video1, video2;
private Button startButton = new Button("Start"),
callButton = new Button("Call"),
hangupButton = new Button("Hang up"),
sendOffer = new Button("Exchange"),
sendAnswer = new Button("Send Answer"),
setOffer = new Button("Set Offer"),
setAnswer = new Button("Set Answer"),
copyOffer = new Button("copy Offer"),
pasteOffer = new Button("paste Offer"),
copyAnswer = new Button("copy Answer"),
pasteAnswer = new Button("paste Answer"),
copyOffer2 = new Button("copy Offer 2- from text file"),
setOffer2 = new Button("set Offer 2"),
copyAnswer2 = new Button("copy Answer 2- from text file"),
setAnswer2 = new Button("set Answer 2");
private TextArea toffer1 = new TextArea("", 5, 7, TextArea.ANY),
toffer2 = new TextArea("", 5, 7, TextArea.ANY),
tAnswer1 = new TextArea("", 5, 7, TextArea.ANY),
tAnswer2 = new TextArea("", 5, 7, TextArea.ANY);
{
startButton.setEnabled(true);
callButton.setEnabled(false);
hangupButton.setEnabled(false);
startButton.addActionListener(evt->start());
callButton.addActionListener(evt->call());
hangupButton.addActionListener(evt->hangup());
sendOffer.addActionListener(evt->sendOffer());
// sendAnswer.addActionListener(evt->sendAnswer());
setOffer.addActionListener(evt->setOffer());
setAnswer.addActionListener(evt->setAnswer());
copyOffer.addActionListener(evt->{ Display.getInstance().copyToClipboard(toffer1.getText()); });
pasteOffer.addActionListener(evt->{ toffer2.setText((String)Display.getInstance().getPasteDataFromClipboard()); });
copyAnswer.addActionListener(evt->{ Display.getInstance().copyToClipboard(tAnswer1.getText()); });
pasteAnswer.addActionListener(evt->{ tAnswer2.setText((String)Display.getInstance().getPasteDataFromClipboard()); });
copyOffer2.addActionListener(evt->{
if (FileChooser.isAvailable()) {
FileChooser.showOpenDialog(".xls, .csv, text/plain", e2-> {
String file = (String)e2.getSource();
if (file == null) {
// hi.add("No file was selected");
// hi.revalidate();
} else {
String extension = null;
if (file.lastIndexOf(".") > 0) {
extension = file.substring(file.lastIndexOf(".")+1);
}
if ("txt".equals(extension)) {
FileSystemStorage fs = FileSystemStorage.getInstance();
try {
InputStream fis = fs.openInputStream(file);
// hi.addComponent(new SpanLabel(Util.readToString(fis)));
toffer1.setText(Util.readToString(fis));
} catch (Exception ex) {
Log.e(ex);
}
} else {
// hi.add("Selected file "+file);
}
}
//hi.revalidate();
});
}
});
copyAnswer2.addActionListener(evt->{
if (FileChooser.isAvailable()) {
FileChooser.showOpenDialog("text/plain", e2-> {
String file = (String)e2.getSource();
if (file == null) {
// hi.add("No file was selected");
// hi.revalidate();
} else {
String extension = null;
if (file.lastIndexOf(".") > 0) {
extension = file.substring(file.lastIndexOf(".")+1);
}
if ("txt".equals(extension)) {
FileSystemStorage fs = FileSystemStorage.getInstance();
try {
InputStream fis = fs.openInputStream(file);
// hi.addComponent(new SpanLabel(Util.readToString(fis)));
tAnswer1.setText(Util.readToString(fis));
} catch (Exception ex) {
Log.e(ex);
}
} else {
// hi.add("Selected file "+file);
}
}
//hi.revalidate();
});
}
});
}
InteractionDialog dlg = new InteractionDialog(" - - -");
{dlg.setLayout(new BoxLayout(BoxLayout.Y_AXIS));
dlg.setScrollable(true);
dlg.setDisposeWhenPointerOutOfBounds(true);
dlg.add(" the offer - copy this offer ");dlg.add(copyOffer);dlg.add(copyOffer2);
dlg.addComponent(toffer1); dlg.add("===============================");
dlg.add(" paste offer here ");dlg.add(pasteOffer);
dlg.addComponent(toffer2);
dlg.addComponent(setOffer);
dlg.add("===============================");
dlg.add(" copy this answer ");dlg.add(copyAnswer);dlg.add(copyAnswer2);
dlg.addComponent(tAnswer1);
dlg.add("===============================");
dlg.add("paste answer here");dlg.add(pasteAnswer);
dlg.addComponent(tAnswer2);
dlg.addComponent(setAnswer);
}
private void sendOffer() {
//= call
Dimension pre = dlg.getContentPane().getPreferredSize();
dlg.show(Display.getInstance().getDisplayHeight()/16,0, Display.getInstance().getDisplayWidth()/16, 0);
} /*
private void sendAnswer() {
}*/
private Date startTime;
private MediaStream localStream;
private RTCPeerConnection pc1, pc2;
private SpanLabel pc1StateDiv = new SpanLabel();
private SpanLabel pc2StateDiv = new SpanLabel();
private SpanLabel pc1IceStateDiv = new SpanLabel();
private SpanLabel pc2IceStateDiv = new SpanLabel();
private SpanLabel pc1ConnStateDiv = new SpanLabel();
private SpanLabel pc2ConnStateDiv = new SpanLabel();
private static final RTCOfferOptions offerOptions = new RTCOfferOptions()
.offerToReceiveAudio(true)
.offerToReceiveVideo(true);
private RTC rtc;
public NewClass() {
super("Peer Connection Demo", new BorderLayout());
Container center = new Container(BoxLayout.y());
Container videoCnt = new Container(new BorderLayout());
String intro = "This sample was adapted from the \"PeerConnection: States Demo\" on the WebRTC web site.";
Button viewSource = new Button("View Source");
FontImage.setMaterialIcon(viewSource, FontImage.MATERIAL_LINK);
viewSource.addActionListener(evt->CN.execute("https://github.com/shannah/CN1WebRTC/blob/master/src/com/codename1/webrtc/demos/PeerConnectionStatesDemo.java"));
add(BorderLayout.NORTH, BoxLayout.encloseY(new SpanLabel(intro), viewSource));
//,toffer1,tAnswer1,setAnswer,toffer2,setOffer,tAnswer2
videoCnt.add(BorderLayout.SOUTH, FlowLayout.encloseCenter(startButton, callButton,sendOffer, hangupButton));
videoCnt.setPreferredH(CN.getDisplayHeight()/2);
center.add(videoCnt);
center.add("PC1 state:").
add(pc1StateDiv).
add("PC1 ICE state:").
add(pc1IceStateDiv).
add("PC1 connection state:").
add(pc1ConnStateDiv).
add("PC2 state:").
add(pc2StateDiv).
add("PC2 ICE state:").
add(pc2IceStateDiv).
add("PC2 connection state:").
add(pc2ConnStateDiv).
add(new SpanLabel("View the console to see logging. The MediaStream object localStream, and the RTCPeerConnection objects localPeerConnection and remotePeerConnection are in global scope, so you can inspect them in the console as well."));
center.setScrollableY(true);
add(BorderLayout.CENTER, center);
RTC.createRTC().onSuccess(r->{
rtc = r;
video1 = rtc.createVideo();
video1.setAutoplay(true);
video1.setMuted(true);
video1.applyStyle("position:fixed;width:50%;height:100%;top:0;left:0;bottom:0;");
video2 = rtc.createVideo();
video2.setAutoplay(true);
video2.applyStyle("position:fixed;width:50%;height:100%;top:0;right:0;bottom:0;");
rtc.append(video1);
rtc.append(video2);
video1.onloadedmetadata(evt->{
System.out.println("Local video videoWidth: "+video1.getVideoWidth()+"px, videoHeight: "+video1.getVideoHeight()+"px");
});
video2.onloadedmetadata(evt->{
System.out.println("Remote video size changed to "+video2.getVideoWidth()+"x"+video2.getVideoHeight());
if (startTime != null) {
long elapsedTime = System.currentTimeMillis() - startTime.getTime();
System.out.println("Setup time: "+elapsedTime+"ms");
startTime = null;
}
});
videoCnt.add(BorderLayout.CENTER, rtc.getVideoComponent());
revalidateWithAnimationSafety();
});
}
private void gotStream(MediaStream stream) {
Log.p("Received local stream");
video1.setSrcObject(stream);
localStream = stream;
stream.retain();
callButton.setEnabled(true);
}
private void start() {
Log.p("Requesting local stream");
startButton.setEnabled(false);
rtc.getUserMedia(new MediaStreamConstraints().audio(true).video(true)).onSuccess(stream->{
gotStream(stream);
}).onFail(t->{
Log.e((Throwable)t);
Dialog.show("Error", "getUserMedia() error: "+((Throwable)t).getMessage(), "OK", null);
});
}
private void call() {
callButton.setEnabled(false);
hangupButton.setEnabled(true);
Log.p("Starting call");
startTime = new Date();
MediaStreamTracks videoTracks = localStream.getVideoTracks();
MediaStreamTracks audioTracks = localStream.getAudioTracks();
if (videoTracks.size() > 0) {
Log.p("Using video device "+videoTracks.get(0).getLabel());
}
if (audioTracks.size() > 0) {
Log.p("Using audio device "+audioTracks.get(0).getLabel());
}
RTCConfiguration servers = new RTCConfiguration();
pc1 = rtc.newRTCPeerConnection(servers);
Log.p("Created local peer connection object pc1");
pc1StateDiv.setText(pc1.getSignalingState()+"");
pc1.onsignalingstatechange(evt->stateCallback1());
pc1IceStateDiv.setText(pc1.getIceConnectionState()+"");
pc1.oniceconnectionstatechange(evt->iceStateCallback1())
.onconnectionstatechange(evt->connStateCallback1())
.onicecandidate(e->onIceCandidate(pc1, e));
/* */
pc2 = rtc.newRTCPeerConnection(servers);
Log.p("Created remote peer connection object pc2");
pc2StateDiv.setText(pc2.getSignalingState()+"");
pc2.onsignalingstatechange(evt->stateCallback2());
pc2IceStateDiv.setText(pc2.getIceConnectionState()+"");
pc2.oniceconnectionstatechange(evt->iceStateCallback2())
.onconnectionstatechange(evt->connStateCallback2())
.onicecandidate(evt->{
onIceCandidate(pc2, evt);
});
pc2.ontrack(evt->gotRemoteStream(evt));
for (MediaStreamTrack track : localStream.getTracks()) {
pc1.addTrack(track, localStream);
}
Log.p("Adding local stream to peer connection");
/* */
pc1.createOffer(offerOptions).onSuccess(offer->{
gotDescription1(offer);
})
.onFail(e-> {
onCreateSessionDescriptionError((Throwable)e);
});
}
private void onCreateSessionDescriptionError(Throwable e) {
Log.p("Failed to create session description: "+e.getMessage(), Log.ERROR);
}
private void gotDescription1(RTCSessionDescription description) {
pc1.setLocalDescription(description);
Log.p("Offer from pc1:\n"+description.getSdp());
sendOffer(description.getSdp());
}
private void sendOffer(String jsDesc) {
toffer1.setText(jsDesc);
// getOffer(jsDesc);
}
private void setOffer() {
String desc = toffer2.getText();
if(desc.endsWith("\n")){getOffer(desc);}else{getOffer(desc+"\n");}
}
private void getOffer(String jsDesc) {
RTCSessionDescription description = rtc.createSessionDescription(RTCSessionDescription.RTCSdpType.Offer, jsDesc);
pc2.setRemoteDescription(description).onSuccess(ef->{
})
.onFail(e->onCreateSessionDescriptionError((Throwable)e)); ;
pc2.createAnswer()
.onSuccess(desc->{ sendAnswer(desc.getSdp()); } )
.onFail(e->onCreateSessionDescriptionError((Throwable)e));
}
private void sendAnswer(String jsDesc) {
tAnswer1.setText(jsDesc);
// getAnswer(jsDesc);
}
private void setAnswer() {
String desc = tAnswer2.getText();
//getAnswer(desc+"\n");
//getAnswer(desc);
if(desc.endsWith("\n")){getAnswer(desc);}else{getAnswer(desc+"\n");}
}
private void getAnswer(String jsDesc) {
RTCSessionDescription description = rtc.createSessionDescription(RTCSessionDescription.RTCSdpType.Answer, jsDesc);
gotDescription2(description);
}
private void gotDescription2(RTCSessionDescription description) {
pc2.setLocalDescription(description);
Log.p("Answer from pc2\n"+description.getSdp());
pc1.setRemoteDescription(description);
}
private void hangup() {
Log.p("Ending call");
pc1.close();
pc2.close();
pc1StateDiv.setText(pc1StateDiv.getText() + pc1.getSignalingState());
pc2StateDiv.setText(pc2StateDiv.getText() + pc2.getSignalingState());
pc1.release();
pc1 = null;
pc2.release();
pc2 = null;
hangupButton.setEnabled(false);
callButton.setEnabled(true);
}
private void gotRemoteStream(RTCTrackEvent e) {
if (video2.getSrcObject() != e.getStreams().get(0)) {
video2.setSrcObject(e.getStreams().get(0));
Log.p("Got remote stream");
}
}
private void stateCallback1() {
if (pc1 != null) {
Log.p("pc1 state change callback, state: "+pc1.getSignalingState());
pc1StateDiv.setText(pc1StateDiv.getText() + pc1.getSignalingState());
}
}
private void stateCallback2() {
if (pc2 != null) {
Log.p("pc2 state change callback, state: "+pc2.getSignalingState());
pc2StateDiv.setText(pc2StateDiv.getText() + pc2.getSignalingState());
}
}
private void iceStateCallback1() {
if (pc1 != null) {
Log.p("pc1 ICE connection state change callback, state: "+pc1.getIceConnectionState());
pc1IceStateDiv.setText(pc1IceStateDiv.getText() + pc1.getIceConnectionState());
}
}
private void iceStateCallback2() {
if (pc2 != null) {
Log.p("pc2 ICE connection state change callback, state: "+pc2.getIceConnectionState());
pc2IceStateDiv.setText(pc2IceStateDiv.getText() + pc2.getIceConnectionState());
}
}
private void connStateCallback1() {
if (pc1 != null) {
Log.p("pc1 connection state change callback, state: "+pc1.getConnectionState());
pc1ConnStateDiv.setText(pc1ConnStateDiv.getText() + pc1.getConnectionState());
}
}
private void connStateCallback2() {
if (pc2 != null) {
Log.p("pc2 connection state change callback, state: "+pc2.getConnectionState());
pc2ConnStateDiv.setText(pc2ConnStateDiv.getText() + pc2.getConnectionState());
}
}
private RTCPeerConnection getOtherPc(RTCPeerConnection pc) {
return pc == pc1 ? pc2 : pc1;
}
private String getName(RTCPeerConnection pc) {
return pc == pc1 ? "pc1" : "pc2";
}
private void onIceCandidate(RTCPeerConnection pc, RTCPeerConnectionIceEvent event) {
getOtherPc(pc).addIceCandidate(event.getCandidate())
.onSuccess(res->onAddIceCandidateSuccess(pc))
.onFail(e->{
Log.e((Throwable)e);
onAddIceCandidateError(pc, (Throwable)e);
}).onComplete(e->{
Log.p(getName(pc)+" ICE candidate:\n"+(event.getCandidate() != null ? event.getCandidate().getCandidate() : "(null"));
});
}
private void onAddIceCandidateSuccess(RTCPeerConnection pc) {
Log.p(getName(pc)+" addIceCandidate success");
}
private void onAddIceCandidateError(RTCPeerConnection pc, Throwable error) {
Log.p(getName(pc)+" failed to add ICE Candidate: "+error.getMessage(), Log.ERROR);
}
#Override
public void close() throws Exception {
if (rtc != null) {
rtc.close();
rtc = null;
}
if (pc1 != null) {
pc1.release();
pc1 = null;
}
if (pc2 != null) {
pc2.release();
pc2 = null;
}
}
}
Call is initiated from Phone - as device A to Simulator
On device A - My Phone
PC1 state:
nullHaveLocalOfferStable
PC1 ICE state:
New
PC1 connection state:
PC2 state:
nullHaveRemoteOffer
PC2 ICE state:
New
PC2 connection state:
On Device B - PC Simulator
PC1 state:
nullHaveLocalOfferStable
PC1 ICE state:
NewCheckingConnected
PC1 connection state:
Connecting
PC2 state:
nullHaveRemoteOfferStable
PC2 ICE state:
NewChecking
PC2 connection state:
Connecting
There is an error message:
java.lang.RuntimeException: Failed to execute 'addIceCandidate' on 'RTCPeerConnection': Error processing ICE candidate
But the final 3 lines on the console are
[EDT] 0:20:48,103 - pc2 addIceCandidate success
[EDT] 0:20:48,103 - pc2 ICE candidate:
candidate:1503035259 1 udp 7935 154.127.57.220 50066 typ relay raddr 129.205.113.2 rport 6591 generation 0 ufrag lXG+ network-cost 999
My Steps are:
from A
Start Button
Call Button - Creates Offer
Copy Offer from toffer1 (using Exchange Button)
Paste into toffer2
Set Offer Button
on B
6. Start Button
7. Call Button
8. Paste Offer of A into TextArea toffer1 and toffer2 (using Exchange Button)
9. Set Offer Button
10. Copy Answer from tAnswer1
11. Paste Answer into tAnswer2
on A
Paste Answer on tAnswer1 and tAnswer2
Set Answer Button
on B
14. set Answer Button
Am trying to create a an azure function that is triggered in a Logic Apps,
The functions purpose is to web crawl certain web sites, take the desired information, compare that with a a SQL Server database in Azure, compare if we already have that information if not add it.
My issue is that when ever i run it I get the Server 500 error, I assume its accessing the database that cause. Help?
public static async Task<IActionResult> Run(
[HttpTrigger(AuthorizationLevel.Function, "get", "post", Route = null)] HttpRequest req, ILogger log
)
{
log.LogInformation("C# HTTP trigger function processed a request.");
string RequestBody = await new StreamReader(req.Body).ReadToEndAsync();
{
return await CrawlBlog(0, RequestBody);
}
}
private static async Task<IActionResult> CrawlBlog(int Picker, string req)
{
int BlogPicker = Picker;
string TheResult = req;
//Get the url we want to test
var Url = "";
if (BlogPicker == 0)
{
Url = "*********";
}
else if (BlogPicker == 1)
{
Url = "*********";
}
/*
else if (BlogPicker == 2)
{
Url = "https://azure.microsoft.com/en-in/blog/?utm_source=devglan";
}
*/
else
{
TheResult = "False we got a wrong pick";
return (ActionResult)new OkObjectResult
( new {TheResult });
}
var httpClient = new HttpClient();
var html = await httpClient.GetStringAsync(Url);
var htmlDocument = new HtmlDocument();
htmlDocument.LoadHtml(html);
//a list to add all availabel blogs we found
var Blog = new List<BlogStats>();
switch (BlogPicker)
{
case 0:
{
var divs =
htmlDocument.DocumentNode.Descendants("div")
.Where(node => node.GetAttributeValue("class", "").Equals("home_blog_sec_text")).ToList();
foreach (var divo in divs)
{
var Blogo = new BlogStats
{
Summary = divo.Descendants("p").FirstOrDefault().InnerText,
Link = divo.Descendants("a").FirstOrDefault().ChildAttributes("href").FirstOrDefault().Value,
Title = divo.Descendants("a").FirstOrDefault().InnerText
};
Blog.Add(Blogo);
}
break;
}
case 1:
{
var divs =
htmlDocument.DocumentNode.Descendants("div")
.Where(node => node.GetAttributeValue("class", "").Equals("post_header_title two_third last")).ToList();
foreach (var divo in divs)
{
//string TheSummary = "we goofed";
var ThePs = divo.Descendants("p").ToList();
var Blogo = new BlogStats
{
Summary = ThePs[1].GetDirectInnerText(),
Link = divo.Descendants("a").LastOrDefault().ChildAttributes("href").FirstOrDefault().Value,
Title = divo.Descendants("a").FirstOrDefault().InnerText
};
Blog.Add(Blogo);
}
break;
}
}
TheResult = await SqlCheck(Blog[0].Title, Blog[0].Summary, Blog[0].Link); //error 500
return (ActionResult)new OkObjectResult
(
new
{
TheResult
}
);
}
public static async Task<string> SqlCheck(string Tit, string Sumy, string Lin)
{
SqlConnectionStringBuilder builder = new SqlConnectionStringBuilder();
builder.DataSource = "flygon.database.windows.net";
builder.UserID = "*****";
builder.Password = "********";
builder.InitialCatalog = "torkoal";
System.Data.DataSet ds = new System.Data.DataSet();
SqlConnection connection = new SqlConnection(builder.ConnectionString);
connection.Open();
SqlCommand CheckCommand = new SqlCommand("SELECT * FROM TableBoto WHERE Link = #id3 ", connection);
CheckCommand.Parameters.AddWithValue("#id3", Lin);
SqlDataAdapter dataAdapter = new SqlDataAdapter(CheckCommand);
dataAdapter.Fill(ds);
int i = ds.Tables[0].Rows.Count;
if (i > 0)
{
return $" We got a Duplicates in title : {Tit}";
}
try
{
{
string query = $"insert into TableBoto(Title,Summary,Link) values('{Tit}','{Sumy}','{Lin}');";
SqlCommand command = new SqlCommand(query, connection);
SqlDataReader reader = await command.ExecuteReaderAsync();
reader.Close();
}
}
catch (SqlException)
{
// Console.WriteLine(e.ToString());
}
connection.Close();
return $" Success Ign +{Tit} + Ign {Sumy}+ Ign {Lin} Ign Success SQL ";
}
}
500 HTTP status code is a generic code which means that the server was not able to process the request due to some issues, First step would be to add some exception handling to your function and see if the failure occurs and where it occurs.
On Side note, you should not use HTTP client in the way used in the code, you should not new it up every time your function executes, this client should be static in nature. Refer Manage connections in Azure Functions
Hi i am getting below error while trying to access the website which is hosted in IIS 8 for which the SSL certificate had got expired and i installed the new SSL certificate provided by GoDaddy, it was all working fine for 2 days and now it shows the below error. Let me know if anyone can figure out what is the issue
using Microsoft.CognitiveServices.Speech;
using Newtonsoft.Json;
using System;
using System.Diagnostics;
using System.Net.Http;
using System.Threading;
using System.Threading.Tasks;
namespace SPT
{
class Program
{
public static async Task RecognizeSpeechAsync()
{
// Creates an instance of a speech config with specified subscription key and service region.
// Replace with your own subscription key // and service region (e.g., "westus").
var config = SpeechConfig.FromSubscription(" 7cf359266c964dc789960abe063cc65b", "westus");
// Creates a speech recognizer.
using (var recognizer = new SpeechRecognizer(config))
{
Console.WriteLine("Say something...");
// Starts speech recognition, and returns after a single utterance is recognized. The end of a
// single utterance is determined by listening for silence at the end or until a maximum of 15
// seconds of audio is processed. The task returns the recognition text as result.
// Note: Since RecognizeOnceAsync() returns only a single utterance, it is suitable only for single
// shot recognition like command or query.
// For long-running multi-utterance recognition, use StartContinuousRecognitionAsync() instead.
var result = await recognizer.RecognizeOnceAsync();
// Checks result.
if (result.Reason == ResultReason.RecognizedSpeech)
{
Console.WriteLine($"We recognized: {result.Text}");
}
else if (result.Reason == ResultReason.NoMatch)
{
Console.WriteLine($"NOMATCH: Speech could not be recognized.");
}
else if (result.Reason == ResultReason.Canceled)
{
var cancellation = CancellationDetails.FromResult(result);
Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");
if (cancellation.Reason == CancellationReason.Error)
{
Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
Console.WriteLine($"CANCELED: Did you update the subscription info?");
}
}
}
}
public static async Task SynthesisToSpeakerAsync()
{
// Creates an instance of a speech config with specified subscription key and service region.
// Replace with your own subscription key and service region (e.g., "westus").
// The default language is "en-us".
var config = SpeechConfig.FromSubscription("7cf359266c964dc789960abe063cc65b", "westus");
// Creates a speech synthesizer using speaker as audio output.
using (var synthesizer = new SpeechSynthesizer(config))
{
// Receive a text from console input and synthesize it to speaker.
Console.WriteLine("Type some text that you want to speak...");
Console.Write("> ");
string text = Console.ReadLine();
using (var result = await synthesizer.SpeakTextAsync(text))
{
if (result.Reason == ResultReason.SynthesizingAudioCompleted)
{
Console.WriteLine($"Speech synthesized to speaker for text [{text}]");
}
else if (result.Reason == ResultReason.Canceled)
{
var cancellation = SpeechSynthesisCancellationDetails.FromResult(result);
Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");
if (cancellation.Reason == CancellationReason.Error)
{
Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
Console.WriteLine($"CANCELED: ErrorDetails=[{cancellation.ErrorDetails}]");
Console.WriteLine($"CANCELED: Did you update the subscription info?");
}
}
}
// This is to give some time for the speaker to finish playing back the audio
Console.WriteLine("Press any key to exit...");
Console.ReadKey();
}
}
public static async Task SynthesisToVideoAsync()
{
var apiUrl = "https://api.videoindexer.ai";
var accountId = "56fbb8f8-b9a8-4119-b46a-fa5fb6668ddd";
var location = "westus2";
var apiKey = "6f354f730bc141f9bc3e57e73c6001b0";
System.Net.ServicePointManager.SecurityProtocol = System.Net.ServicePointManager.SecurityProtocol | System.Net.SecurityProtocolType.Tls12;
// create the http client
var handler = new HttpClientHandler();
handler.AllowAutoRedirect = false;
var client = new HttpClient(handler);
client.DefaultRequestHeaders.Add("Ocp-Apim-Subscription-Key", apiKey);
// obtain account access token
var accountAccessTokenRequestResult = client.GetAsync($"{apiUrl}/auth/{location}/Accounts/{accountId}/AccessToken?allowEdit=true").Result;
var accountAccessToken = accountAccessTokenRequestResult.Content.ReadAsStringAsync().Result.Replace("\"", "");
client.DefaultRequestHeaders.Remove("Ocp-Apim-Subscription-Key");
// upload a video
var content = new MultipartFormDataContent();
Debug.WriteLine("Uploading...");
// get the video from URL
var videoUrl = "VIDEO_URL"; // replace with the video URL
// as an alternative to specifying video URL, you can upload a file.
// remove the videoUrl parameter from the query string below and add the following lines:
//FileStream video =File.OpenRead(Globals.VIDEOFILE_PATH);
//byte[] buffer =newbyte[video.Length];
//video.Read(buffer, 0, buffer.Length);
//content.Add(newByteArrayContent(buffer));
var uploadRequestResult = client.PostAsync($"{apiUrl}/{location}/Accounts/{accountId}/Videos?accessToken={accountAccessToken}&name=some_name&description=some_description&privacy=private&partition=some_partition&videoUrl={videoUrl}", content).Result;
var uploadResult = uploadRequestResult.Content.ReadAsStringAsync().Result;
// get the video id from the upload result
var videoId = JsonConvert.DeserializeObject<dynamic>(uploadResult)["id"];
Debug.WriteLine("Uploaded");
Debug.WriteLine("Video ID: " + videoId);
// obtain video access token
client.DefaultRequestHeaders.Add("Ocp-Apim-Subscription-Key", apiKey);
var videoTokenRequestResult = client.GetAsync($"{apiUrl}/auth/{location}/Accounts/{accountId}/Videos/{videoId}/AccessToken?allowEdit=true").Result;
var videoAccessToken = videoTokenRequestResult.Content.ReadAsStringAsync().Result.Replace("\"", "");
client.DefaultRequestHeaders.Remove("Ocp-Apim-Subscription-Key");
// wait for the video index to finish
while (true)
{
Thread.Sleep(10000);
var videoGetIndexRequestResult = client.GetAsync($"{apiUrl}/{location}/Accounts/{accountId}/Videos/{videoId}/Index?accessToken={videoAccessToken}&language=English").Result;
var videoGetIndexResult = videoGetIndexRequestResult.Content.ReadAsStringAsync().Result;
var processingState = JsonConvert.DeserializeObject<dynamic>(videoGetIndexResult)["state"];
Debug.WriteLine("");
Debug.WriteLine("State:");
Debug.WriteLine(processingState);
// job is finished
if (processingState != "Uploaded" && processingState != "Processing")
{
Debug.WriteLine("");
Debug.WriteLine("Full JSON:");
Debug.WriteLine(videoGetIndexResult);
break;
}
}
// search for the video
var searchRequestResult = client.GetAsync($"{apiUrl}/{location}/Accounts/{accountId}/Videos/Search?accessToken={accountAccessToken}&id={videoId}").Result;
var searchResult = searchRequestResult.Content.ReadAsStringAsync().Result;
Debug.WriteLine("");
Debug.WriteLine("Search:");
Debug.WriteLine(searchResult);
// get insights widget url
var insightsWidgetRequestResult = client.GetAsync($"{apiUrl}/{location}/Accounts/{accountId}/Videos/{videoId}/InsightsWidget?accessToken={videoAccessToken}&widgetType=Keywords&allowEdit=true").Result;
var insightsWidgetLink = insightsWidgetRequestResult.Headers.Location;
Debug.WriteLine("Insights Widget url:");
Debug.WriteLine(insightsWidgetLink);
// get player widget url
var playerWidgetRequestResult = client.GetAsync($"{apiUrl}/{location}/Accounts/{accountId}/Videos/{videoId}/PlayerWidget?accessToken={videoAccessToken}").Result;
var playerWidgetLink = playerWidgetRequestResult.Headers.Location;
Debug.WriteLine("");
Debug.WriteLine("Player Widget url:");
Debug.WriteLine(playerWidgetLink);
}
static void Main()
{
RecognizeSpeechAsync().Wait();
SynthesisToSpeakerAsync().Wait();
SynthesisToVideoAsync().Wait();
Console.WriteLine("Please press a key to continue.");
Console.ReadLine();
}
}
}
I am using NYT's developers movie reviews API, and i am at the beginning where i just want to see a response. It appears that i get a NULL response which catches the exception that i will pinpoint on the code. " CharSequence text = "There was an error. Please try again";" to help you find it. Could someone please tell me what causes this problem.
NYT Documentation Link http://developer.nytimes.com/movie_reviews_v2.json#/Documentation/GET/critics/%7Bresource-type%7D.json
public class MainActivity extends AppCompatActivity {
private final String site = "https://api.nytimes.com/svc/movies/v2/reviews/search.json?query=";
public int count;
public int i;
public int j;
public int k;
public int n;
public int comas;
public int ingadded;
public String web2 = "";
public String istos;
public ArrayList<String> mylist = new ArrayList<>();
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
final Button next = (Button) findViewById(R.id.button);
final EditText edit_text = (EditText) findViewById(R.id.ing);
final TextView show_ing = (TextView) findViewById(R.id.show_ing);
final Button done = (Button) findViewById(R.id.button3);
final Button refresh = (Button) findViewById(R.id.refresh);
final Button delete = (Button) findViewById(R.id.delete);
final ProgressDialog Dialog = new ProgressDialog(MainActivity.this);
//done move to next activity
done.setOnClickListener(new View.OnClickListener() {
#Override
//CHECK IF TEXT BOX IS EMPTY
public void onClick(View view) {
web2 = edit_text.getText().toString();
//check if there are ingredients added
if (web2 == "") {
Context context = getApplicationContext();
CharSequence text = "Search Bar is Empty!";
int duration = Toast.LENGTH_SHORT;
Toast toast = Toast.makeText(context, text, duration);
toast.show();
Dialog.dismiss();
}
else {
//IF NOT CREATE THE LINK AND SEND IT TO LongOperation
web2 = edit_text.getText().toString();
//create link - MAYBE THE WAY API KEY MUST BE CALLED?
istos = site + web2 + "?api-key=xxxxxxxxxxxx";
Log.v("Showme=", istos);
web2 = "";
// WebServer Request URL
String serverURL = istos;
// Use AsyncTask execute Method To Prevent ANR Problem
new LongOperation().execute(serverURL);
}
}
});
edit_text.setOnFocusChangeListener(new View.OnFocusChangeListener() {
public void onFocusChange(View v, boolean hasFocus) {
if (hasFocus)
edit_text.setHint("");
else
edit_text.setHint("Type the title of the movie");
}
});
private class LongOperation extends AsyncTask<String, String, Void> {
// Required initialization
private final HttpClient Client = new DefaultHttpClient();
private String Content;
private String Error = null;
private Integer count;
private int add = 1;
private ProgressDialog Dialog = new ProgressDialog(MainActivity.this);
String data = "";
TextView jsonParsedname = (TextView) findViewById(R.id.jsonParsedname1);
ArrayList<ArrayList<Integer>> numArray = new ArrayList<ArrayList<Integer>>();
int sizeData = 0;
protected void onPreExecute() {
//Start Progress Dialog (Message)
Dialog.setMessage("Finding Movies..");
Dialog.show();
try {
// Set Request parameter
data = "&" + URLEncoder.encode("data", "UTF-8");
} catch (UnsupportedEncodingException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
// Call after onPreExecute method
protected Void doInBackground(String... urls) {
/************ Make Post Call To Web Server ***********/
BufferedReader reader = null;
// Send data
try {
// Defined URL where to send data
URL url = new URL(urls[0]);
// Send POST data request
URLConnection conn = url.openConnection();
conn.setDoOutput(true);
OutputStreamWriter wr = new OutputStreamWriter(conn.getOutputStream());
wr.write(data);
wr.flush();
// Get the server response
reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
StringBuilder sb = new StringBuilder();
String line = "";
// Read Server Response
while ((line = reader.readLine()) != null) {
// Append server response in string
sb.append(line + "");
}
// Append Server Response To Content String
Content = sb.toString();
} catch (Exception ex) {
Error = ex.getMessage();
} finally {
try {
reader.close();
} catch (Exception ex) {
}
}
/*****************************************************/
return null;
}
protected void onPostExecute(Void unused) {
// NOTE: You can call UI Element here.
// Close progress dialog
Dialog.dismiss();
if (Error != null) {
Context context = getApplicationContext();
CharSequence text = "There was an error. Please try again";
int duration = Toast.LENGTH_SHORT;
Toast toast = Toast.makeText(context, text, duration);
toast.show();
Dialog.dismiss();
} else {
JSONObject jsonResponse;
try {
/****** Creates a new JSONObject with name/value mappings from the JSON string. ********/
jsonResponse = new JSONObject(Content);
if (jsonResponse == null) {
jsonParsedname.setText("Wrong Input");
}
/***** Returns the value mapped by name if it exists and is a JSONArray. ***/
/******* Returns null otherwise. *******/
JSONArray jsonMainNode = jsonResponse.optJSONArray("results");