I am currently tryting to detect a user logout in macOS 10.14 (Mojave). I found this, which was working in the past:
Catching Logoff (not power off) event on MAC using objective C
The code I use is:
NSAppleEventManager* m = [NSAppleEventManager sharedAppleEventManager];
NSAppleEventDescriptor* desc = [m currentAppleEvent];
switch ([[desc attributeDescriptorForKeyword:kAEQuitReason] int32Value])
{
case kAELogOut:
case kAEReallyLogOut:
// log out
break;
case kAEShowRestartDialog:
case kAERestart:
// system restart
break;
case kAEShowShutdownDialog:
case kAEShutDown:
// system shutdown
break;
default:
// ordinary quit
break;
}
But the value I get is always zero (0).
Did something change in Mojave or is there another mechanism? This code is called in the applicationShouldTerminate function in my AppDelegate.
It's still available and it works (just tested on macOS Big Sur), but the value is in the enumCodeValue property.
func applicationShouldTerminate(_ sender: NSApplication) -> NSApplication.TerminateReply {
let reason = NSAppleEventManager.shared()
.currentAppleEvent?
.attributeDescriptor(forKeyword: kAEQuitReason)
switch reason?.enumCodeValue {
case kAELogOut, kAEReallyLogOut:
print("Logout")
return .terminateNow
case kAERestart, kAEShowRestartDialog:
print("Restart")
return .terminateNow
case kAEShutDown, kAEShowShutdownDialog:
print("Shutdown")
return .terminateNow
case 0:
// `enumCodeValue` docs:
//
// The contents of the descriptor, as an enumeration type,
// or 0 if an error occurs.
print("We don't know")
return .terminateNow
default:
print("Cmd-Q, Quit menu item, ...")
return .terminateNow
}
}
Related
I am using Canon's EDSDK version 13.16.0 on Windows10 with a C++ compiler. My goal is to read an image from a camera directly into the host memory (without storing the image on the camera or host) and get access to its RAW RGB data. The data is then taken to create FITS files used in astro-photography.
I could successfully do the following:
create two threads, one to launch the EDSDK by EdsInitializeSDK() and finally remove it by EdsTerminateSDK() (I call this the main thread), the second one to execute all the other EDSDK commands (the CommandThread). Two threads are required to avoid dead locking of the EDSDK. BTW, in contrary to the SDK documentation, EDSDK is only sending event messages to the thread that executes EdsInitialize(), but NOT to the thread that calls EdsOpenSession() (in my case the CommandThread), so the message loop (or sloppy the "message pump") must be installed there in order to catch events fired by the EDSDK.
Shooting an image with properly set ISO and Tv
register a callback function with EdsSetObjectEventHandler()
setting up a static callback function CatchObjectEvent(EdsObjectEvent Event,EdsBaseRef Object,EdsVoid *Context) to catch the kEdsObjectEvent_DirItemRequestTransfer.
setting the camera property kEdsPropID_SaveTo to kEdsSaveTo_Host
Here is the callback function code:
static EdsError EDSCALLBACK CatchObjectEvent(EdsObjectEvent Event,EdsBaseRef Object,EdsVoid *Context)
{
EdsDirectoryItemInfo DirItemInfo;
EdsStreamRef MemStream,ImageStream;
EdsImageRef Image;
EdsImageInfo ImageInfo;
EdsTargetImageType ImageType;
TEDSDKCtx *Ctx;
EdsError Err;
EdsUInt32 Colors,Depth;
EdsUInt64 ImageBytes;
String S;
switch(Event) {
case kEdsObjectEvent_DirItemRequestTransfer: // Object is EdsDirectoryItemRef
// check first, if exposure was terminated by user
Ctx=(TEDSDKCtx *)Context;
if(Ctx->TermExposure) {
Err=Canon.EdsDownloadCancel(Object); // discard image
Err=EDS_ERR_IMAGE_INTERUPTED;
Ctx->Error=Err;
}
else {
Err=Canon.EdsGetDirectoryItemInfo(Object,&DirItemInfo);
if(Err==EDS_ERR_OK) {
MemStream=NULL;
Err=Canon.EdsCreateMemoryStream(DirItemInfo.size,&MemStream);
if(MemStream!=NULL) { // stream could be allocated
S="Downloading";
SYNC.Add_To_VCL_Queue(S,clCream,NOCHNG,Form1->lbCamState[Ctx->CamID]);
// download progress bar callback function Err=Canon.EdsSetProgressCallback(MemStream,CatchProgressEvent,kEdsProgressOption_Periodically,Ctx);
Err=Canon.EdsDownload(Object,DirItemInfo.size,MemStream);// download stream
Err=Canon.EdsDownloadComplete(Object);
Image=NULL;
Err=Canon.EdsCreateImageRef(MemStream,&Image);
if(Image!=NULL) {
// image conversion successful
Err=Canon.EdsGetImageInfo(Image,kEdsImageSrc_FullView,&ImageInfo);
if(Err==EDS_ERR_OK) {
ASCUtil.Cam[Ctx->CamID].XSize=ImageInfo.width;
ASCUtil.Cam[Ctx->CamID].YSize=ImageInfo.height;
Colors=ImageInfo.numOfComponents;
Depth=ImageInfo.componentDepth/8;
if(ImageInfo.componentDepth==8) ImageType=kEdsTargetImageType_RGB;
else ImageType=kEdsTargetImageType_RGB16;
ImageBytes=Colors*Depth*ImageInfo.width*ImageInfo.height;
ImageStream=NULL;
Err=Canon.EdsCreateMemoryStream(ImageBytes,&ImageStream);
if(ImageStream!=NULL) {
// stream could be allocated Err=Canon.EdsGetImage(Image,kEdsImageSrc_RAWFullView,ImageType,ImageInfo.effectiveRect,ImageInfo.effectiveRect.size,ImageStream);
if(Err==EDS_ERR_OK) { // convert image data
// here I would like to use the RGB data from the ImageStream
}
else Ctx->Error=Err;
Canon.EdsRelease(ImageStream);
}
else Ctx->Error=Err;
}
else Ctx->Error=Err;
Canon.EdsRelease(Image);
}
else Ctx->Error=Err;
Canon.EdsRelease(MemStream);
}
else Ctx->Error=Err;
}
else Ctx->Error=Err;
}
Ctx->ImageTaken=true;
break;
case kEdsObjectEvent_VolumeInfoChanged:
break;
case kEdsObjectEvent_VolumeUpdateItems:
break;
case kEdsObjectEvent_FolderUpdateItems:
break;
case kEdsObjectEvent_DirItemCreated:
break;
case kEdsObjectEvent_DirItemRemoved:
break;
case kEdsObjectEvent_DirItemInfoChanged:
break;
case kEdsObjectEvent_DirItemContentChanged:
break;
case kEdsObjectEvent_DirItemRequestTransferDT:
break;
case kEdsObjectEvent_DirItemCancelTransferDT:
break;
case kEdsObjectEvent_VolumeAdded:
break;
case kEdsObjectEvent_VolumeRemoved:
break;
default:
break;
}
if(Object) Err=Canon.EdsRelease(Object);
return(Err);
}
After the image exposure has passed, the kEdsObjectEvent_DirItemRequestTransfer event is nicely catched. The objects MemStream, Image and ImageInfo are properly set. ImageInfo:
Width: 4272
Height: 2848
Depth: 16
Colors: 3
I am using a Canon EOS 450D without a memory card.
I have realized from the size of MemStream (about 11 MB), that the downloaded data is compressed (which makes sense) and is converted to an uncompressed RAW image by the EdsImageRef object. But - one step before heaven - the function EdsGetImage(), which should offer the desired RGB stream, is stopping me with the error message EDS_ERR_FILE_OPEN_ERROR. How is that? I was assuming from the SDK documentation, that this function also works for streams and does not require a physical file to work with. Or is the error message misleading here? So far in the processing of the code, no file has been created neither on the camera, nor on the host and the EDSDK did everything without complaining about a missing memory card, etc.
I have tried to reduce the image resolution to kEdsTargetImageType_RGB, but the problem persists. Maybe I am misunderstanding the SDK concept here.
Also, reported SDK commands like EdsSaveImage() to store a stream to disk seem to be not available anymore, at least in EDSDK version 13.16.1.
Please, can you help me?
Here are steps to reproduce:
Activate AVAudioSession with .playback category.
Register for AVAudioSession.interruptionNotification
Create two AVPlayers and start them
Interrupt playback by calling Siri/receiving a call by Skype, Cellular and etc.
Expected behavior:
Receiving notification of the audio session interruption with .began state at the start and .ended at the end. Also, as a side effect, Siri doesn't respond to commands.
Real behavior:
Only .began notification is called.
To bring back .ended notification (which is used to continue playback) remove one player.
Question: how to handle the audio session interruption with more than 1 AVPlayer running?
Here I created a simple demo project: https://github.com/denis-obukhov/AVAudioSessionBug
Tested on iOS 14.4
import UIKit
import AVFoundation
class ViewController: UIViewController {
private let player1: AVPlayer? = {
$0.volume = 0.5
return $0
}(AVPlayer())
private let player2: AVPlayer? = {
$0.volume = 0.5
return $0 // return nil for any player to bring back .ended interruption notification
}(AVPlayer())
override func viewDidLoad() {
super.viewDidLoad()
registerObservers()
startAudioSession()
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
player1?.replaceCurrentItem(with: makePlayerItem(named: "music1"))
player2?.replaceCurrentItem(with: makePlayerItem(named: "music2"))
[player1, player2].forEach { $0?.play() }
}
private func makePlayerItem(named name: String) -> AVPlayerItem {
let fileURL = Bundle.main.url(
forResource: name,
withExtension: "mp3"
)!
return AVPlayerItem(url: fileURL)
}
private func registerObservers() {
NotificationCenter.default.addObserver(
self, selector: #selector(handleInterruption(_:)),
name: AVAudioSession.interruptionNotification,
object: nil
)
}
private func startAudioSession() {
try? AVAudioSession.sharedInstance().setCategory(.playback)
try? AVAudioSession.sharedInstance().setActive(true)
}
#objc private func handleInterruption(_ notification: Notification) {
print("GOT INTERRUPTION")
guard
let userInfo = notification.userInfo,
let typeValue = userInfo[AVAudioSessionInterruptionTypeKey] as? UInt,
let type = AVAudioSession.InterruptionType(rawValue: typeValue)
else {
return
}
switch type {
case .began:
print("Interruption BEGAN")
[player1, player2].forEach { $0?.pause() }
case .ended:
// This part isn't called if more than 1 player is playing
print("Interruption ENDED")
[player1, player2].forEach { $0?.play() }
#unknown default:
print("Unknown value")
}
}
}
I just ran into the same issue, and it was driving me crazy for a few days. I'm using two AVQueuePlayer (a subclass of AVPlayer) to play two sets of audio sounds on top of each other, and I get the AVAudioSession.interruptionNotification value of .began when there is an incoming call, but there is no .ended notification when the call ends.
That said, I've found that for some reason, .ended is reliably sent if you instead use two instances of AVAudioPlayer. It also works with one instance of AVAudioPlayer mixed with another instance of AVQueuePlayer. But for some reason using two instances of AVQueuePlayer (or AVPlayer) seems to break it.
Did you ever find a solution for this? For my purposes I need queuing of tracks so I must use AVQueuePlayer, so I'll probably file a bug report with Apple.
I have using NSArrayController NSTableView and Core data binding.
I have take one button and connect add: method of NSArrayController to its action.
On Adding new record
TableView added and shows new record.
NSArrayController's add: method called
Problem :
Value is not added into core data (Sqlite type).
On application relaunching shows old data.
This is an apple example code. Basically it tries to save the context before app is terminated. Depending on your specific case you might move the functionality to somewhere else.
func applicationShouldTerminate(_ sender: NSApplication) -> NSApplication.TerminateReply {
// Save changes in the application's managed object context before the application terminates.
let context = persistentContainer.viewContext
if !context.commitEditing() {
NSLog("\(NSStringFromClass(type(of: self))) unable to commit editing to terminate")
return .terminateCancel
}
if !context.hasChanges {
return .terminateNow
}
do {
try context.save()
} catch {
let nserror = error as NSError
// Customize this code block to include application-specific recovery steps.
let result = sender.presentError(nserror)
if (result) {
return .terminateCancel
}
let question = NSLocalizedString("Could not save changes while quitting. Quit anyway?", comment: "Quit without saves error question message")
let info = NSLocalizedString("Quitting now will lose any changes you have made since the last successful save", comment: "Quit without saves error question info");
let quitButton = NSLocalizedString("Quit anyway", comment: "Quit anyway button title")
let cancelButton = NSLocalizedString("Cancel", comment: "Cancel button title")
let alert = NSAlert()
alert.messageText = question
alert.informativeText = info
alert.addButton(withTitle: quitButton)
alert.addButton(withTitle: cancelButton)
let answer = alert.runModal()
if answer == .alertSecondButtonReturn {
return .terminateCancel
}
}
// If we got here, it is time to quit.
return .terminateNow
}
You're probably missing setting the managedObjectContext and entityName on NSArrayController.
I have Status Bar Cocoa App that provides GUI with general information and preferences windows for my background running server. This server is implemented in C programming language and the only work it is doing now is simple echo to client.
Application has button that starts this server. When I press this button server starts and listens on randomly selected port. It works correctly, I can even restart server etc.
I start server using this code:
- (void) startServerInBackground: (server_t) serverFunc {
dispatch_queue_t server_dispatch_queue = dispatch_queue_create("click.remotely.Server", DISPATCH_QUEUE_CONCURRENT);
dispatch_async(server_dispatch_queue, ^(void){
start_server(serverFunc, serverInfo);
/***
* dispatch_async(dispatch_get_main_queue(), ^(void){
* //Run UI Updates
* });
*/
});
}
The problem begins only when some client connects to the server.
I can connect using telnet 192.168.8.101 <PORT_NUMBER>. I can even talk to server and it replays correctly. Here the odd things happens!
When I try to open Status Bar Cocoa App I can get crashes like this:
1. No crash for very long time (client talking to server, restarting, switching windows and panes)
2. Get crash immediately after connecting client and selecting Status Bar Icon of App
3. Get crash some time later when I connect client, open Status Bar Icon, select some Menu Item and try to Open window.
4. The app can also crash a little later ex. not on first window opening but third, fourth or n-th window opening, button clicking.
Below screenshot shows how this nondeterministic looks
What causes the app to crash? How can I resolve this issue?
Here is my server loop in C
/**
* Function is looping infinitely and waiting
* for new incoming client connections.
* It handles connections one by one on the same thread.
*/
result_t iterative_stream_server_loop(server_info_t *server_info, connection_handler_t handle_connection) {
sock_fd_t cs_fd, ps_fd;
// get passive server socket
ps_fd = server_info_sock(server_info);
while(1) {
if(server_info_should_shut_down(server_info)) {
return CLOSED;
}
if(server_info_force_shut_down(server_info)) {
return FORCE_CLOSED;
}
// check to accept new connection on the main thread...
cs_fd = accept_new_connection(ps_fd);
if(cs_fd == FAILURE) {
fprintf(stderr, "accept_new_connection: failed!\n");
server_info_connection_error_event(server_info, cs_fd, CONN_ERROR_ACCEPT, "accept_new_connection: failed!");
return FAILURE;
} else if(cs_fd == CONTINUE) {
continue;
}
// publish client connected event
server_info_client_connected_event(server_info, cs_fd);
printf("Handle connection on the main thread...\n");
switch (handle_connection(server_info, cs_fd)) {
case FAILURE:
fprintf(stderr, "handle_connection: failed!\n");
// publish connection error event
server_info_connection_error_event(server_info, cs_fd, CONN_ERROR_HANDLER, "handle_connection: failed!");
break;
case CLOSED:
printf("handle_connection: closed!\n");
// publish client disconnecting event
server_info_client_disconnecting_event(server_info, cs_fd);
break;
default:
break;
}
if(close(cs_fd) < 0){
fprintf(stderr, "close: %s\n", strerror(errno));
server_info_connection_error_event(server_info, cs_fd, CONN_ERROR_CLOSE, strerror(errno));
return FAILURE;
}
}
}
And here is the client connection handling (echo service)
result_t echo_service_connection_handler(server_info_t *server_info, sock_fd_t sock_fd) {
char buf[MAX_BUF_SIZE];
int n_recv; // number of bytes received
int n_sent; // number of bytes sent
fcntl(sock_fd, F_SETFL, O_NONBLOCK);
while(1) {
if(server_info_should_shut_down(server_info))
return CLOSED;
if ((n_recv = recv(sock_fd, buf, sizeof(buf) - 1, 0)) <= 0) {
if(n_recv == 0) {
printf("echo connection is closing...\n");
return CLOSED;
}
if( (errno == EAGAIN) || (errno == EWOULDBLOCK)) {
// call to recv() on non-blocking socket result with nothing to receive
continue;
}
perror("recv");
// publish connection error event
server_info_connection_error_event(server_info, sock_fd, CONN_ERROR_RECV, strerror(errno));
return FAILURE;
}
buf[n_recv] = '\0';
printf(ANSI_COLOR_BLUE "server: received '%s'" ANSI_COLOR_RESET "\n", buf);
if ((n_sent = send(sock_fd, buf, strlen(buf), 0)) < 0) {
perror("send");
// publish connection error event
server_info_connection_error_event(server_info, sock_fd, CONN_ERROR_SEND, strerror(errno));
return FAILURE;
}
}
return SUCCESS;
}
I am using the following code to check the MPMediaLibrary authorizations:
func handlePermissions() {
let permissionStatus = MPMediaLibrary.authorizationStatus()
switch (permissionStatus) {
case MPMediaLibraryAuthorizationStatus.authorized:
print("permission status is authorized")
case MPMediaLibraryAuthorizationStatus.notDetermined:
print("permission status is not determined")
MPMediaLibrary.requestAuthorization(MPMediaLibraryAuthorizationStatus -> permissionStatus)
case MPMediaLibraryAuthorizationStatus.denied:
print("permission status is denied")
case MPMediaLibraryAuthorizationStatus.restricted:
print("permission status is restricted")
}
}
Ultimately, I am trying to prompt the user for their authorization (upon launch) prior to calling a query...via the case MPMediaLibraryAuthorizationStatus.notDetermined:. The code above produces the error: Expected type after '->'. When the requestAuthorization() line is commented out, the app crashes upon launch (access has not been authorized) and the authorization prompt view is shown after the launch screen disappears.
I've seen some examples of how to perform requestAuthorization() in Objective C but nothing in Swift. I don't understand:
MPMediaLibrary.requestAuthorization( handler: (MPMediaLibraryAuthorizationStatus) -> Void )
What is the proper way to request authorization for access to the MPMediaLibrary in Swift 3?
You've actually used the prototype of the requestAuthorization method. You need to adapt it to your own use.
MPMediaLibrary.requestAuthorization( handler: (MPMediaLibraryAuthorizationStatus) -> Void )
means that requestAuthorization take a function as parameter and this function takes a MPMediaLibraryAuthorizationStatus as parameter an return nothing.
For example if I want to request the authorisation and then display the result inside my console. I first check if the application is not already authorised :
if authoriationStatus != .authorized {
MPMediaLibrary.requestAuthorization({
(status) in
switch status {
case .notDetermined:
print("notDetermined")
case .denied:
print("denied")
case .restricted:
print("restricted")
case .authorized:
print("authorized")
}
})
}
As you can see, I used a function as a parameter for the method requestAuthorization. The function is described inside {...}. It's called a closure and it's something you always use in Swift.
For swift 4.2 to check authorisations for MPMediaLibrary
import MediaPlayer
let status = MPMediaLibrary.authorizationStatus()
switch status {
case .authorized:
self.openMusicLibrary()
break
case .notDetermined:
MPMediaLibrary.requestAuthorization() { status in
if status == .authorized {
DispatchQueue.main.async {
self.openMusicLibrary()
}
}
}
break
case .denied:
//show alert
print("Please Allow Access to the Media & Apple Music from appliction setting.")
break
case .restricted:
break
}