I can retrieve an OS X disk partition UUID by this code:
void PrintUUID()
{
DADiskRef disk;
CFDictionaryRef descDict;
DASessionRef session = DASessionCreate(NULL);
if (session) {
disk = DADiskCreateFromBSDName(NULL, session, "/dev/disk0s2");
if (disk) {
descDict = DADiskCopyDescription(disk);
if (descDict) {
CFTypeRef value = (CFTypeRef)CFDictionaryGetValue(descDict,
CFSTR("DAVolumeUUID"));
CFStringRef strValue = CFStringCreateWithFormat(NULL, NULL,
CFSTR("%#"), value);
print(strVal); <------------- here is the output
CFRelease(strValue);
CFRelease(descDict);
}
CFRelease(disk);
}
}
}
Above code retrieve UUID of disk0, I want to retrieve UUID of root disk (mount point = /),
if I use "/" instead "/dev/disk0s2" then DADiskCopyDescription returns NULL.
Also I know I can do it in Terminal by this command:
diskutil info /
Briefly how can I retrieve BSD Name of root disk? (to use it in DADiskCreateFromBSDName)
Anybody has an idea?
Thanks.
Use DADiskCreateFromVolumePath instead of DADiskCreateFromBSDName:
char *mountPoint = "/";
CFURLRef url = CFURLCreateFromFileSystemRepresentation(NULL, (const UInt8 *)mountPoint, strlen(mountPoint), TRUE);
disk = DADiskCreateFromVolumePath(NULL, session, url);
CFRelease(url);
Swift code:
let mountPoint = "/"
let url = URL(fileURLWithPath: mountPoint)
if let session = DASessionCreate(nil),
let disk = DADiskCreateFromVolumePath(nil, session, url as CFURL),
let desc = DADiskCopyDescription(disk) as? [String: CFTypeRef] {
if let uuid = desc["DAVolumeUUID"], CFGetTypeID(uuid) == CFUUIDGetTypeID() {
if let uuidString = CFUUIDCreateString(nil, (uuid as! CFUUID)) {
print(uuidString)
}
}
}
DADiskCreateFromVolumePath is only included in OS 10.7 and above, so if you need to support older platforms like OS 10.4 and above (like me!) the only option is to use statfs to generate a BSD name from the posix name, so the entire function then becomes:
#include <sys/param.h>
#include <sys/mount.h>
void PrintUUID()
{
DADiskRef disk;
CFDictionaryRef descDict;
DASessionRef session = DASessionCreate (kCFAllocatorDefault);
if (session) {
struct statfs statFS;
statfs ("/", &statFS);
disk = DADiskCreateFromBSDName (kCFAllocatorDefault, session, statFS.f_mntfromname);
if (disk) {
descDict = DADiskCopyDescription (disk);
if (descDict) {
CFTypeRef value = (CFTypeRef) CFDictionaryGetValue (descDict, CFSTR("DAVolumeUUID"));
CFStringRef strValue = CFStringCreateWithFormat (NULL, NULL, CFSTR("%#"), value);
print (strValue) <------------- here is the output
CFRelease (strValue);
CFRelease (descDict);
}
CFRelease (disk);
}
CFRelease (session);
}
}
Related
I'm trying to convert the code of objective C mentioned here - https://gist.github.com/Coeur/1409855/f6df10c79f8cdd0fcb2a0735b99f4b3a74b9f954
to swift
The code I've wrote till now in swift -
class func getMacAddress(_ ifName: String?) -> String? {
var mgmtInfoBase = [Int32](repeating: 0, count: 6)
var msgBuffer: Int8? = nil
var length: size_t
var macAddress = [UInt8](repeating: 0, count: 6)
var interfaceMsgStruct: if_msghdr?
var socketStruct: sockaddr_dl?
var errorFlag: String? = nil
// Setup the management Information Base (mib)
mgmtInfoBase[0] = Int32(Int(CTL_NET)) // Request network subsystem
mgmtInfoBase[1] = Int32(Int(AF_ROUTE)) // Routing table info
mgmtInfoBase[2] = 0
mgmtInfoBase[3] = Int32(Int(AF_LINK)) // Request link layer information
mgmtInfoBase[4] = Int32(Int(NET_RT_IFLIST)) // Request all configured interfaces
mgmtInfoBase[5] = Int32(if_nametoindex(ifName?.utf8CString)) //ERROR: Type of expression is ambiguous without more context
// With all configured interfaces requested, get handle index
if ( mgmtInfoBase[5] == 0) {
errorFlag = "if_nametoindex failure"
} else {
// Get the size of the data available (store in len)
if sysctl(&mgmtInfoBase, 6, nil, &length, nil, 0) < 0 {
errorFlag = "sysctl mgmtInfoBase failure"
} else {
// Alloc memory based on above call
if (msgBuffer = Int8((length))) == nil {
errorFlag = "buffer allocation failure"
} else {
// Get system information, store in buffer
if sysctl(&mgmtInfoBase, 6, &msgBuffer, &length, nil, 0) < 0 {
errorFlag = "sysctl msgBuffer failure"
}
}
}
}
// Before going any further...
if errorFlag != nil {
// Release the buffer memory
if (msgBuffer != nil) {
free(&msgBuffer)
}
return nil
}
// Map msgbuffer to interface message structure
interfaceMsgStruct = msgBuffer as? if_msghdr
// Map to link-level socket structure
socketStruct = (interfaceMsgStruct + 1) as? sockaddr_dl // ERROR: Cannot convert value of type 'if_msghdr?' to expected argument type 'Int'
// Copy link layer address data in socket structure to an array
if socketStruct == nil {
return nil
}
memcpy(&macAddress, socketStruct.sdl_data + socketStruct.sdl_nlen, 6) // ERROR: Type of expression is ambiguous without more context
// Read from char array into a string object, into traditional Mac address format
let macAddressString = String(format: "%02X:%02X:%02X:%02X:%02X:%02X", macAddress[0], macAddress[1], macAddress[2], macAddress[3], macAddress[4], macAddress[5])
// Release the buffer memory
free(&msgBuffer)
return macAddressString
}
I'm getting the errors that I've mentioned. I searched and tried every possible thing and read articles from the documentation but still I couldn't get away with these errors. Please help.
The definitions of the functions for which I'm getting error as mentioned in Darwin.posix.net.if -
public func if_nametoindex(_: UnsafePointer<Int8>!) -> UInt32
public struct if_msghdr {
public var ifm_msglen: UInt16 /* to skip non-understood messages */
public var ifm_version: UInt8 /* future binary compatability */
public var ifm_type: UInt8 /* message type */
public var ifm_addrs: Int32 /* like rtm_addrs */
public var ifm_flags: Int32 /* value of if_flags */
public var ifm_index: UInt16 /* index for associated ifp */
public var ifm_data: if_data /* statistics and other data about if */
public init()
public init(ifm_msglen: UInt16, ifm_version: UInt8, ifm_type: UInt8, ifm_addrs: Int32, ifm_flags: Int32, ifm_index: UInt16, ifm_data: if_data)
}
public func memcpy(_ __dst: UnsafeMutableRawPointer!, _ __src: UnsafeRawPointer!, _ __n: Int) -> UnsafeMutableRawPointer!````
I want to get a snapshot of the process info in the os x system.
The 'NSProcessInfo' can only get info of the calling process.
The ps cmd can be one solution, but i'd like a c or objective-c program.
Here's an example using using libproc.h to iterate over all the processes on the system and determine how many of them belong to the effective user of the process. You can easily modify this for your needs.
- (NSUInteger)maxSystemProcs
{
int32_t maxproc;
size_t len = sizeof(maxproc);
sysctlbyname("kern.maxproc", &maxproc, &len, NULL, 0);
return (NSUInteger)maxproc;
}
- (NSUInteger)runningUserProcs
{
NSUInteger maxSystemProcs = self.maxSystemProcs;
pid_t * const pids = calloc(maxSystemProcs, sizeof(pid_t));
NSAssert(pids, #"Memory allocation failure.");
const int pidcount = proc_listallpids(pids, (int)(maxSystemProcs * sizeof(pid_t)));
NSUInteger userPids = 0;
uid_t uid = geteuid();
for (int *pidp = pids; *pidp; pidp++) {
struct proc_bsdshortinfo bsdshortinfo;
int writtenSize;
writtenSize = proc_pidinfo(*pidp, PROC_PIDT_SHORTBSDINFO, 0, &bsdshortinfo, sizeof(bsdshortinfo));
if (writtenSize != (int)sizeof(bsdshortinfo)) {
continue;
}
if (bsdshortinfo.pbsi_uid == uid) {
userPids++;
}
}
free(pids);
return (NSUInteger)userPids;
}
I've followed iFrameExtractor to successfully stream rtsp in my swift project. In this project, it also has recording function. It basically use avformat_write_header
, av_interleaved_write_frame and av_write_trailer to save the rtsp source into mp4 file.
When I used this project in my device, the rtsp streaming works fine, but recording function will always generate a blank mp4 file with no image and sound.
Could anyone tell me what step that I miss?
I'm using iPhone5 with iOS 9.1 and XCode 7.1.1.
The ffmpeg is 2.8.3 version and followed the compile instruction by CompilationGuide – FFmpeg
Following is the sample code in this project
The function that generate every frame:
-(BOOL)stepFrame {
// AVPacket packet;
int frameFinished=0;
static bool bFirstIFrame=false;
static int64_t vPTS=0, vDTS=0, vAudioPTS=0, vAudioDTS=0;
while(!frameFinished && av_read_frame(pFormatCtx, &packet)>=0) {
// Is this a packet from the video stream?
if(packet.stream_index==videoStream) {
// 20130525 albert.liao modified start
// Initialize a new format context for writing file
if(veVideoRecordState!=eH264RecIdle)
{
switch(veVideoRecordState)
{
case eH264RecInit:
{
if ( !pFormatCtx_Record )
{
int bFlag = 0;
//NSString *videoPath = [NSHomeDirectory() stringByAppendingPathComponent:#"Documents/test.mp4"];
NSString *videoPath = #"/Users/liaokuohsun/iFrameTest.mp4";
const char *file = [videoPath UTF8String];
pFormatCtx_Record = avformat_alloc_context();
bFlag = h264_file_create(file, pFormatCtx_Record, pCodecCtx, pAudioCodecCtx,/*fps*/0.0, packet.data, packet.size );
if(bFlag==true)
{
veVideoRecordState = eH264RecActive;
fprintf(stderr, "h264_file_create success\n");
}
else
{
veVideoRecordState = eH264RecIdle;
fprintf(stderr, "h264_file_create error\n");
}
}
}
//break;
case eH264RecActive:
{
if((bFirstIFrame==false) &&(packet.flags&AV_PKT_FLAG_KEY)==AV_PKT_FLAG_KEY)
{
bFirstIFrame=true;
vPTS = packet.pts ;
vDTS = packet.dts ;
#if 0
NSRunLoop *pRunLoop = [NSRunLoop currentRunLoop];
[pRunLoop addTimer:RecordingTimer forMode:NSDefaultRunLoopMode];
#else
[NSTimer scheduledTimerWithTimeInterval:5.0//2.0
target:self
selector:#selector(StopRecording:)
userInfo:nil
repeats:NO];
#endif
}
// Record audio when 1st i-Frame is obtained
if(bFirstIFrame==true)
{
if ( pFormatCtx_Record )
{
#if PTS_DTS_IS_CORRECT==1
packet.pts = packet.pts - vPTS;
packet.dts = packet.dts - vDTS;
#endif
h264_file_write_frame( pFormatCtx_Record, packet.stream_index, packet.data, packet.size, packet.dts, packet.pts);
}
else
{
NSLog(#"pFormatCtx_Record no exist");
}
}
}
break;
case eH264RecClose:
{
if ( pFormatCtx_Record )
{
h264_file_close(pFormatCtx_Record);
#if 0
// 20130607 Test
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^(void)
{
ALAssetsLibrary *library = [[ALAssetsLibrary alloc]init];
NSString *filePathString = [NSHomeDirectory() stringByAppendingPathComponent:#"Documents/test.mp4"];
NSURL *filePathURL = [NSURL fileURLWithPath:filePathString isDirectory:NO];
if(1)// ([library videoAtPathIsCompatibleWithSavedPhotosAlbum:filePathURL])
{
[library writeVideoAtPathToSavedPhotosAlbum:filePathURL completionBlock:^(NSURL *assetURL, NSError *error){
if (error) {
// TODO: error handling
NSLog(#"writeVideoAtPathToSavedPhotosAlbum error");
} else {
// TODO: success handling
NSLog(#"writeVideoAtPathToSavedPhotosAlbum success");
}
}];
}
[library release];
});
#endif
vPTS = 0;
vDTS = 0;
vAudioPTS = 0;
vAudioDTS = 0;
pFormatCtx_Record = NULL;
NSLog(#"h264_file_close() is finished");
}
else
{
NSLog(#"fc no exist");
}
bFirstIFrame = false;
veVideoRecordState = eH264RecIdle;
}
break;
default:
if ( pFormatCtx_Record )
{
h264_file_close(pFormatCtx_Record);
pFormatCtx_Record = NULL;
}
NSLog(#"[ERROR] unexpected veVideoRecordState!!");
veVideoRecordState = eH264RecIdle;
break;
}
}
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
}
else if(packet.stream_index==audioStream)
{
// 20131024 albert.liao modfied start
static int vPktCount=0;
BOOL bIsAACADTS = FALSE;
int ret = 0;
if(aPlayer.vAACType == eAAC_UNDEFINED)
{
tAACADTSHeaderInfo vxAACADTSHeaderInfo = {0};
bIsAACADTS = [AudioUtilities parseAACADTSHeader:(uint8_t *)packet.data ToHeader:&vxAACADTSHeaderInfo];
}
#synchronized(aPlayer)
{
if(aPlayer==nil)
{
aPlayer = [[AudioPlayer alloc]initAudio:nil withCodecCtx:(AVCodecContext *) pAudioCodecCtx];
NSLog(#"aPlayer initAudio");
if(bIsAACADTS)
{
aPlayer.vAACType = eAAC_ADTS;
//NSLog(#"is ADTS AAC");
}
}
else
{
if(vPktCount<5) // The voice is listened once image is rendered
{
vPktCount++;
}
else
{
if([aPlayer getStatus]!=eAudioRunning)
{
dispatch_async(dispatch_get_main_queue(), ^(void) {
#synchronized(aPlayer)
{
NSLog(#"aPlayer start play");
[aPlayer Play];
}
});
}
}
}
};
#synchronized(aPlayer)
{
int ret = 0;
ret = [aPlayer putAVPacket:&packet];
if(ret <= 0)
NSLog(#"Put Audio Packet Error!!");
}
// 20131024 albert.liao modfied end
if(bFirstIFrame==true)
{
switch(veVideoRecordState)
{
case eH264RecActive:
{
if ( pFormatCtx_Record )
{
h264_file_write_audio_frame(pFormatCtx_Record, pAudioCodecCtx, packet.stream_index, packet.data, packet.size, packet.dts, packet.pts);
}
else
{
NSLog(#"pFormatCtx_Record no exist");
}
}
}
}
}
else
{
//fprintf(stderr, "packet len=%d, Byte=%02X%02X%02X%02X%02X\n",\
packet.size, packet.data[0],packet.data[1],packet.data[2],packet.data[3], packet.data[4]);
}
// 20130525 albert.liao modified end
}
return frameFinished!=0;
}
avformat_write_header:
int h264_file_create(const char *pFilePath, AVFormatContext *fc, AVCodecContext *pCodecCtx,AVCodecContext *pAudioCodecCtx, double fps, void *p, int len )
{
int vRet=0;
AVOutputFormat *of=NULL;
AVStream *pst=NULL;
AVCodecContext *pcc=NULL, *pAudioOutputCodecContext=NULL;
avcodec_register_all();
av_register_all();
av_log_set_level(AV_LOG_VERBOSE);
if(!pFilePath)
{
fprintf(stderr, "FilePath no exist");
return -1;
}
if(!fc)
{
fprintf(stderr, "AVFormatContext no exist");
return -1;
}
fprintf(stderr, "file=%s\n",pFilePath);
// Create container
of = av_guess_format( 0, pFilePath, 0 );
fc->oformat = of;
strcpy( fc->filename, pFilePath );
// Add video stream
pst = avformat_new_stream( fc, 0 );
vVideoStreamIdx = pst->index;
fprintf(stderr,"Video Stream:%d",vVideoStreamIdx);
pcc = pst->codec;
avcodec_get_context_defaults3( pcc, AVMEDIA_TYPE_VIDEO );
// Save the stream as origin setting without convert
pcc->codec_type = pCodecCtx->codec_type;
pcc->codec_id = pCodecCtx->codec_id;
pcc->bit_rate = pCodecCtx->bit_rate;
pcc->width = pCodecCtx->width;
pcc->height = pCodecCtx->height;
if(fps==0)
{
double fps=0.0;
AVRational pTimeBase;
pTimeBase.num = pCodecCtx->time_base.num;
pTimeBase.den = pCodecCtx->time_base.den;
fps = 1.0/ av_q2d(pCodecCtx->time_base)/ FFMAX(pCodecCtx->ticks_per_frame, 1);
fprintf(stderr,"fps_method(tbc): 1/av_q2d()=%g",fps);
pcc->time_base.num = 1;
pcc->time_base.den = fps;
}
else
{
pcc->time_base.num = 1;
pcc->time_base.den = fps;
}
// reference ffmpeg\libavformat\utils.c
// For SPS and PPS in avcC container
pcc->extradata = malloc(sizeof(uint8_t)*pCodecCtx->extradata_size);
memcpy(pcc->extradata, pCodecCtx->extradata, pCodecCtx->extradata_size);
pcc->extradata_size = pCodecCtx->extradata_size;
// For Audio stream
if(pAudioCodecCtx)
{
AVCodec *pAudioCodec=NULL;
AVStream *pst2=NULL;
pAudioCodec = avcodec_find_encoder(AV_CODEC_ID_AAC);
// Add audio stream
pst2 = avformat_new_stream( fc, pAudioCodec );
vAudioStreamIdx = pst2->index;
pAudioOutputCodecContext = pst2->codec;
avcodec_get_context_defaults3( pAudioOutputCodecContext, pAudioCodec );
fprintf(stderr,"Audio Stream:%d",vAudioStreamIdx);
fprintf(stderr,"pAudioCodecCtx->bits_per_coded_sample=%d",pAudioCodecCtx->bits_per_coded_sample);
pAudioOutputCodecContext->codec_type = AVMEDIA_TYPE_AUDIO;
pAudioOutputCodecContext->codec_id = AV_CODEC_ID_AAC;
// Copy the codec attributes
pAudioOutputCodecContext->channels = pAudioCodecCtx->channels;
pAudioOutputCodecContext->channel_layout = pAudioCodecCtx->channel_layout;
pAudioOutputCodecContext->sample_rate = pAudioCodecCtx->sample_rate;
pAudioOutputCodecContext->bit_rate = 12000;//pAudioCodecCtx->sample_rate * pAudioCodecCtx->bits_per_coded_sample;
pAudioOutputCodecContext->bits_per_coded_sample = pAudioCodecCtx->bits_per_coded_sample;
pAudioOutputCodecContext->profile = pAudioCodecCtx->profile;
//FF_PROFILE_AAC_LOW;
// pAudioCodecCtx->bit_rate;
// AV_SAMPLE_FMT_U8P, AV_SAMPLE_FMT_S16P
//pAudioOutputCodecContext->sample_fmt = AV_SAMPLE_FMT_FLTP;//pAudioCodecCtx->sample_fmt;
pAudioOutputCodecContext->sample_fmt = pAudioCodecCtx->sample_fmt;
//pAudioOutputCodecContext->sample_fmt = AV_SAMPLE_FMT_U8;
pAudioOutputCodecContext->sample_aspect_ratio = pAudioCodecCtx->sample_aspect_ratio;
pAudioOutputCodecContext->time_base.num = pAudioCodecCtx->time_base.num;
pAudioOutputCodecContext->time_base.den = pAudioCodecCtx->time_base.den;
pAudioOutputCodecContext->ticks_per_frame = pAudioCodecCtx->ticks_per_frame;
pAudioOutputCodecContext->frame_size = 1024;
fprintf(stderr,"profile:%d, sample_rate:%d, channles:%d", pAudioOutputCodecContext->profile, pAudioOutputCodecContext->sample_rate, pAudioOutputCodecContext->channels);
AVDictionary *opts = NULL;
av_dict_set(&opts, "strict", "experimental", 0);
if (avcodec_open2(pAudioOutputCodecContext, pAudioCodec, &opts) < 0) {
fprintf(stderr, "\ncould not open codec\n");
}
av_dict_free(&opts);
#if 0
// For Audio, this part is no need
if(pAudioCodecCtx->extradata_size!=0)
{
NSLog(#"extradata_size !=0");
pAudioOutputCodecContext->extradata = malloc(sizeof(uint8_t)*pAudioCodecCtx->extradata_size);
memcpy(pAudioOutputCodecContext->extradata, pAudioCodecCtx->extradata, pAudioCodecCtx->extradata_size);
pAudioOutputCodecContext->extradata_size = pAudioCodecCtx->extradata_size;
}
else
{
// For WMA test only
pAudioOutputCodecContext->extradata_size = 0;
NSLog(#"extradata_size ==0");
}
#endif
}
if(fc->oformat->flags & AVFMT_GLOBALHEADER)
{
pcc->flags |= CODEC_FLAG_GLOBAL_HEADER;
pAudioOutputCodecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
if ( !( fc->oformat->flags & AVFMT_NOFILE ) )
{
vRet = avio_open( &fc->pb, fc->filename, AVIO_FLAG_WRITE );
if(vRet!=0)
{
fprintf(stderr,"avio_open(%s) error", fc->filename);
}
}
// dump format in console
av_dump_format(fc, 0, pFilePath, 1);
vRet = avformat_write_header( fc, NULL );
if(vRet==0)
return 1;
else
return 0;
}
av_interleaved_write_frame:
void h264_file_write_frame(AVFormatContext *fc, int vStreamIdx, const void* p, int len, int64_t dts, int64_t pts )
{
AVStream *pst = NULL;
AVPacket pkt;
if ( 0 > vVideoStreamIdx )
return;
// may be audio or video
pst = fc->streams[ vStreamIdx ];
// Init packet
av_init_packet( &pkt );
if(vStreamIdx ==vVideoStreamIdx)
{
pkt.flags |= ( 0 >= getVopType( p, len ) ) ? AV_PKT_FLAG_KEY : 0;
//pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = pst->index;
pkt.data = (uint8_t*)p;
pkt.size = len;
pkt.dts = AV_NOPTS_VALUE;
pkt.pts = AV_NOPTS_VALUE;
// TODO: mark or unmark the log
//fprintf(stderr, "dts=%lld, pts=%lld\n",dts,pts);
// av_write_frame( fc, &pkt );
}
av_interleaved_write_frame( fc, &pkt );
}
av_write_trailer:
void h264_file_close(AVFormatContext *fc)
{
if ( !fc )
return;
av_write_trailer( fc );
if ( fc->oformat && !( fc->oformat->flags & AVFMT_NOFILE ) && fc->pb )
avio_close( fc->pb );
av_free( fc );
}
Thanks.
It looks like you're using the same AVFormatContext for both the input and output?
In the line
pst = fc->streams[ vStreamIdx ];
You're assigning the AVStream* from your AVFormatContext connected with your input (RTSP stream). But then later on you're trying to write the packet back to the same context av_interleaved_write_frame( fc, &pkt );. I kind of think of a context as a file which has helped me navagate this type of thing better. I do something identicial to what you're doing (not iOS though) where I use a separate AVFormatContext for each of the input (RTSP stream) and output (mp4 file). If I'm correct, I think what you just need to do is initialize an AVFormatContext and properly.
The following code (without error checking everything) is what I do to take an AVFormatContext * output_format_context = NULL and the AVFormatContext * input_format_context that I had associated with the RTSP stream and write from one to the other. This is after I have fetched a packet, etc., which in your case it looks like you're populating (I just take the packet from av_read_frame and re-package it.
This is code that could be in your write frame function (but it also does include the writing of the header).
AVFormatContext * output_format_context;
AVStream * in_stream_2;
AVStream * out_stream_2;
// Allocate the context with the output file
avformat_alloc_output_context2(&output_format_context, NULL, NULL, out_filename.c_str());
// Point to AVOutputFormat * output_format for manipulation
output_format = output_format_context->oformat;
// Loop through all streams
for (i = 0; i < input_format_context->nb_streams; i++) {
// Create a pointer to the input stream that was allocated earlier in the code
AVStream *in_stream = input_format_context->streams[i];
// Create a pointer to a new stream that will be part of the output
AVStream *out_stream = avformat_new_stream(output_format_context, in_stream->codec->codec);
// Set time_base of the new output stream to equal the input stream one since I'm not changing anything (can avoid but get a deprecation warning)
out_stream->time_base = in_stream->time_base;
// This is the non-deprecated way of copying all the parameters from the input stream into the output stream since everything stays the same
avcodec_parameters_from_context(out_stream->codecpar, in_stream->codec);
// I don't remember what this is for :)
out_stream->codec->codec_tag = 0;
// This just sets a flag from the format context to the stream relating to the header
if (output_format_context->oformat->flags & AVFMT_GLOBALHEADER)
out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
// Check NOFILE flag and open the output file context (previously the output file was associated with the format context, now it is actually opened.
if (!(output_format->flags & AVFMT_NOFILE))
avio_open(&output_format_context->pb, out_filename.c_str(), AVIO_FLAG_WRITE);
// Write the header (not sure if this is always needed but h264 I believe it is.
avformat_write_header(output_format_context,NULL);
// Re-getting the appropriate stream that was populated above (this should allow for both audio/video)
in_stream_2 = input_format_context->streams[packet.stream_index];
out_stream_2 = output_format_context->streams[packet.stream_index];
// Rescaling pts and dts, duration and pos - you would do as you need in your code.
packet.pts = av_rescale_q_rnd(packet.pts, in_stream_2->time_base, out_stream_2->time_base, (AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
packet.dts = av_rescale_q_rnd(packet.dts, in_stream_2->time_base, out_stream_2->time_base, (AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
packet.duration = av_rescale_q(packet.duration, in_stream_2->time_base, out_stream_2->time_base);
packet.pos = -1;
// The first packet of my stream always gives me negative dts/pts so this just protects that first one for my purposes. You probably don't need.
if (packet.dts < 0) packet.dts = 0;
if (packet.pts < 0) packet.pts = 0;
// Finally write the frame
av_interleaved_write_frame(output_format_context, &packet);
// ....
// Write header, close/cleanup... etc
// ....
This code is fairly bare bones and doesn't include the setup (which it sounds like you're doing correctly anyway). I would also imagine this code could be cleaned up and tweaked for your purposes, but this works for me to re-write the RTSP stream into a file (in my case many files but code not shown).
The code is C code, so you might need to do minor tweaks for making it Swift compatible (for some of the library function calls maybe). I think overall it should be compatible though.
Hopefully this helps point you to the right direction. This was cobbled together thanks to several sample code sources (I don't remember where), along with warning prompts from the libraries themselves.
I was trying to access temp directory in Swift. In Objective-C, I could use the following code to do so:
- (NSString *)tempDirectory {
NSString *tempDirectoryTemplate =
[NSTemporaryDirectory() stringByAppendingPathComponent:#"XXXXX"];
const char *tempDirectoryTemplateCString = [tempDirectoryTemplate fileSystemRepresentation];
char *tempDirectoryNameCString = (char *)malloc(strlen(tempDirectoryTemplateCString) + 1);
strcpy(tempDirectoryNameCString, tempDirectoryTemplateCString);
char *result = mkdtemp(tempDirectoryNameCString);
if (!result) {
return nil;
}
NSString *tempDirectoryPath = [[NSFileManager defaultManager] stringWithFileSystemRepresentation:tempDirectoryNameCString length:strlen(result)];
free(tempDirectoryNameCString);
return tempDirectoryPath;
}
However, I'm a bit confuse about the type conversion and casting from Objective-C to Swift, such as const char * or CMutablePointer<CChar>. Is there any documents that I should look into?
Thanks.
How about something like :
public extension FileManager {
func createTempDirectory() throws -> String {
let tempDirectory = (NSTemporaryDirectory() as NSString).appendingPathComponent(UUID().uuidString)
try FileManager.default.createDirectory(atPath: tempDirectory,
withIntermediateDirectories: true,
attributes: nil)
return tempDirectory
}
}
It doesn't answer your question about char* but it's cleaner...
NSFileManager reference here.
Also check out this SO question regarding unique names.
According to Apple, use of NSTemporaryDirectory is discouraged:
See the FileManager method url(for:in:appropriateFor:create:) for the
preferred means of finding the correct temporary directory. For more
information about temporary files, see File System Programming Guide.
So instead, you should use FileManager.default.temporaryDirectory
or if you want an unique path:
let extractionPath = FileManager.default.temporaryDirectory.appendingPathComponent(UUID().uuidString, isDirectory: true)
Swift 2.1 version:
func createTempDirectory() -> String? {
let tempDirURL = NSURL(fileURLWithPath: NSTemporaryDirectory()).URLByAppendingPathComponent("XXXXXX")
do {
try NSFileManager.defaultManager().createDirectoryAtURL(tempDirURL, withIntermediateDirectories: true, attributes: nil)
} catch {
return nil
}
return tempDirURL.absoluteString
}
Swift 3 and up
I think a good way to do this in swift is with an extension on FileManager. This should create a unique temporary folder and return the URL to you.
extension FileManager{
func createTemporaryDirectory() throws -> URL {
let url = URL(fileURLWithPath: NSTemporaryDirectory()).appendingPathComponent(UUID().uuidString)
try createDirectory(at: url, withIntermediateDirectories: true, attributes: nil)
return url
}
}
Swift 3 version
func createTempDirectory() -> String? {
guard let tempDirURL = NSURL(fileURLWithPath: NSTemporaryDirectory()).appendingPathComponent("myTempFile.xxx") else {
return nil
}
do {
try FileManager.default.createDirectory(at: tempDirURL, withIntermediateDirectories: true, attributes: nil)
} catch {
return nil
}
return tempDirURL.absoluteString
}
A direct translation of your Objective-C code to Swift would be:
func tempDirectory()->String! {
let tempDirectoryTemplate = NSTemporaryDirectory() + "XXXXX"
var tempDirectoryTemplateCString = tempDirectoryTemplate.fileSystemRepresentation().copy()
let result : CString = reinterpretCast(mkdtemp(&tempDirectoryTemplateCString))
if !result {
return nil
}
let fm = NSFileManager.defaultManager()
let tempDirectoryPath = fm.stringWithFileSystemRepresentation(result, length: Int(strlen(result)))
return tempDirectoryPath
}
It uses the same mkdtemp() BSD method as your original code. This method creates
a directory name from the template which is guaranteed not to exist at the time where
the method is called.
Thanks to Nate Cook who figured out that reinterpretCast() can be used to treat the UnsafePointer<CChar> returned by mkdtemp() as a CString, so that it can be passed to stringWithFileSystemRepresentation(), see Working with C strings in Swift, or: How to convert UnsafePointer<CChar> to CString.
As of Xcode 6 beta 6, the reinterpretCast() is not necessary anymore and the
above code can be simplified to
func tempDirectory()->String! {
let tempDirectoryTemplate = NSTemporaryDirectory() + "XXXXX"
var tempDirectoryTemplateCString = tempDirectoryTemplate.fileSystemRepresentation()
let result = mkdtemp(&tempDirectoryTemplateCString)
if result == nil {
return nil
}
let fm = NSFileManager.defaultManager()
let tempDirectoryPath = fm.stringWithFileSystemRepresentation(result, length: Int(strlen(result)))
return tempDirectoryPath
}
Is it possible to create opaque types not derived from CFTypeRef which can be retained/released with CFRetain/CFRelease? Or how do I derive a new type from a CFType?
I've never done this, but it is possible using private API. In all likelihood it will be dependent on a specific dot release of OS X, since the CF runtime could change from release to release. In any case, CF is open source so I took a look at what CFRuntime does. I was happy to see Apple included an example:
// ========================= EXAMPLE =========================
// Example: EXRange -- a "range" object, which keeps the starting
// location and length of the range. ("EX" as in "EXample").
// ---- API ----
typedef const struct __EXRange * EXRangeRef;
CFTypeID EXRangeGetTypeID(void);
EXRangeRef EXRangeCreate(CFAllocatorRef allocator, uint32_t location, uint32_t length);
uint32_t EXRangeGetLocation(EXRangeRef rangeref);
uint32_t EXRangeGetLength(EXRangeRef rangeref);
// ---- implementation ----
#include <CoreFoundation/CFBase.h>
#include <CoreFoundation/CFString.h>
struct __EXRange {
CFRuntimeBase _base;
uint32_t _location;
uint32_t _length;
};
static Boolean __EXRangeEqual(CFTypeRef cf1, CFTypeRef cf2) {
EXRangeRef rangeref1 = (EXRangeRef)cf1;
EXRangeRef rangeref2 = (EXRangeRef)cf2;
if (rangeref1->_location != rangeref2->_location) return false;
if (rangeref1->_length != rangeref2->_length) return false;
return true;
}
static CFHashCode __EXRangeHash(CFTypeRef cf) {
EXRangeRef rangeref = (EXRangeRef)cf;
return (CFHashCode)(rangeref->_location + rangeref->_length);
}
static CFStringRef __EXRangeCopyFormattingDesc(CFTypeRef cf, CFDictionaryRef formatOpts) {
EXRangeRef rangeref = (EXRangeRef)cf;
return CFStringCreateWithFormat(CFGetAllocator(rangeref), formatOpts,
CFSTR("[%u, %u)"),
rangeref->_location,
rangeref->_location + rangeref->_length);
}
static CFStringRef __EXRangeCopyDebugDesc(CFTypeRef cf) {
EXRangeRef rangeref = (EXRangeRef)cf;
return CFStringCreateWithFormat(CFGetAllocator(rangeref), NULL,
CFSTR("<EXRange %p [%p]>{loc = %u, len = %u}"),
rangeref,
CFGetAllocator(rangeref),
rangeref->_location,
rangeref->_length);
}
static void __EXRangeEXRangeFinalize(CFTypeRef cf) {
EXRangeRef rangeref = (EXRangeRef)cf;
// nothing to finalize
}
static CFTypeID _kEXRangeID = _kCFRuntimeNotATypeID;
static CFRuntimeClass _kEXRangeClass = {0};
/* Something external to this file is assumed to call this
* before the EXRange class is used.
*/
void __EXRangeClassInitialize(void) {
_kEXRangeClass.version = 0;
_kEXRangeClass.className = "EXRange";
_kEXRangeClass.init = NULL;
_kEXRangeClass.copy = NULL;
_kEXRangeClass.finalize = __EXRangeEXRangeFinalize;
_kEXRangeClass.equal = __EXRangeEqual;
_kEXRangeClass.hash = __EXRangeHash;
_kEXRangeClass.copyFormattingDesc = __EXRangeCopyFormattingDesc;
_kEXRangeClass.copyDebugDesc = __EXRangeCopyDebugDesc;
_kEXRangeID = _CFRuntimeRegisterClass((const CFRuntimeClass * const)&_kEXRangeClass);
}
CFTypeID EXRangeGetTypeID(void) {
return _kEXRangeID;
}
EXRangeRef EXRangeCreate(CFAllocatorRef allocator, uint32_t location, uint32_t length) {
struct __EXRange *newrange;
uint32_t extra = sizeof(struct __EXRange) - sizeof(CFRuntimeBase);
newrange = (struct __EXRange *)_CFRuntimeCreateInstance(allocator, _kEXRangeID, extra, NULL);
if (NULL == newrange) {
return NULL;
}
newrange->_location = location;
newrange->_length = length;
return (EXRangeRef)newrange;
}
uint32_t EXRangeGetLocation(EXRangeRef rangeref) {
return rangeref->_location;
}
uint32_t EXRangeGetLength(EXRangeRef rangeref) {
return rangeref->_length;
}
#endif
CoreFoundation itself does not provide any such mechanism, but all Cocoa objects will work with CFRetain and CFRelease. So the only supported answer is: Create a class based on Foundation and CoreFoundation will recognize it as a CFTypeRef.