AVCodecContext settings for H264 (1080i) - video-capture

I'm trying to configure x264 for 1080i capturing. Most of these settings below are found in different examples. However, compiled together they don't work. ffmpeg API reports no error, but avcodec_encode_video() always returns zero.
Some of the numbers are strange to me... for example, gop_size. Isn't 250 too high?
Event you can't offer the final answer, I'm still interested in any kind of comment on this subject.
pCodecContext->codec_type = AVMEDIA_TYPE_VIDEO;
pCodecContext->codec_id = CODEC_ID_H264;
pCodecContext->coder_type = FF_CODER_TYPE_AC;
pCodecContext->flags |= CODEC_FLAG_LOOP_FILTER | CODEC_FLAG_INTERLACED_ME | CODEC_FLAG_INTERLACED_DCT;
pCodecContext->me_cmp |= 1;
pCodecContext->partitions |= X264_PART_I8X8 | X264_PART_I4X4 | X264_PART_P8X8 | X264_PART_B8X8;
pCodecContext->me_method = ME_UMH;
pCodecContext->me_subpel_quality = 8;
pCodecContext->me_range = 16;
pCodecContext->bit_rate = 10 * 1024 * 1024; // 10 Mbps??
pCodecContext->width = 1920;
pCodecContext->height = 1080;
pCodecContext->time_base.num = 1; // 25 fps
pCodecContext->time_base.den = 25; // 25 fps
pCodecContext->gop_size = 250; // 250
pCodecContext->keyint_min = 25;
pCodecContext->scenechange_threshold = 40;
pCodecContext->i_quant_factor = 0.71f;
pCodecContext->b_frame_strategy = 1;
pCodecContext->qcompress = 0.6f;
pCodecContext->qmin = 10;
pCodecContext->qmax = 51;
pCodecContext->max_qdiff = 4;
pCodecContext->max_b_frames = 3;
pCodecContext->refs = 4;
pCodecContext->directpred = 3;
pCodecContext->trellis = 1;
pCodecContext->flags2 |= CODEC_FLAG2_WPRED | CODEC_FLAG2_MIXED_REFS | CODEC_FLAG2_8X8DCT | CODEC_FLAG2_FASTPSKIP; // wpred+mixed_refs+dct8x8+fastpskip
pCodecContext->weighted_p_pred = 2; // not implemented with interlaced ??
pCodecContext->crf = 22;
pCodecContext->pix_fmt = PIX_FMT_YUV420P;
pCodecContext->thread_count = 0;

You could analyze some existing 1080i h264 video files to see their parameters.
I found useful for me that links:
http://www.cardinalpeak.com/blog/?p=878
http://sourceforge.net/projects/h264bitstream/

You should strive to avoid setting any x264 options yourself; the library itself knows best and you'll only get poor tunings from reading old source code. Use the AVOption API to set the "preset"/"tune"/"profile" options on the encoder to what you need (see x264 --help).

Related

PIC32MZ UART RX Interrupt

Porting from PIC32MX to MZ (PIC32MZ2048EFG100) and am banging head against wall trying to generate a simple RX interrupt on UART3. Please see code below.
void main(void} {
__builtin_disable_interrupts();
//Ensuring all pins config as digital
ANSELA = 0x0000;
ANSELB = 0x0000;
ANSELC = 0x0000;
ANSELD = 0x0000;
ANSELE = 0x0000;
ANSELF = 0x0000;
ANSELG = 0x0000;
//Convenient macrso to do IOUNLOCK
#define PPSUnLock() {SYSKEY=0x0;SYSKEY=0xAA996655;SYSKEY=0x556699AA;CFGCONbits.IOLOCK=0;}
#define PPSLock() {SYSKEY=0x0;SYSKEY=0xAA996655;SYSKEY=0x556699AA;CFGCONbits.IOLOCK=1;}
//Peripheral Pin Select (PPS) Settings for UART3
PPSUnLock();
U3RXRbits.U3RXR = 0b1010;
RPA14Rbits.RPA14R = 0b0001;
PPSLock();
//Config UART3
U3MODEbits.UEN0 = 0; //no flow control
U3MODEbits.UEN1 = 0;
U3MODEbits.LPBACK = 0; // no loopback
U3MODEbits.ABAUD = 0; //no autobaud
U3MODEbits.BRGH = 0;
U3MODEbits.PDSEL0 = 0; //8 data bits, no parity
U3MODEbits.PDSEL1 = 0;
U3MODEbits.STSEL = 0; // 1 stop bit
U3STAbits.URXISEL0 = 0; //RX Interrupt on first byte in FIFO
U3STAbits.URXISEL1 = 0;
U3BRG = CLOSEST_UBRG_VALUE115200; //Macro defined elsewhere, but it works
//Int priorities
IPC39bits.U3EIP = 6;
IPC39bits.U3EIS = 3;
//Int flags
IFS4bits.U3RXIF = 0;
//Int enable/disable
IEC4bits.U3EIE = 0;
IEC4bits.U3TXIE = 0;
IEC4bits.U3RXIE = 1; //Enable int on RX
//Enable multi-vector interrupts
INTCONSET = _INTCON_MVEC_MASK;
__builtin_enable_interrupts();
//Turn on UART3
U3STAbits.URXEN = 1;
U3STAbits.UTXEN = 1;
U3MODEbits.ON = 1;
UART_txEXTCOMandWait('A'); //Function defined elsewhere - I get successful byte "A" sent to my terminal. So TX works.
while(1){Nop();}; //wait in endless loop for interrupt to occur on keystroke
}
Below is my ISR:
void __ISR_AT_VECTOR (_UART3_RX_VECTOR, IPL6SOFT) U3Interrupt(void) {
unsigned int test=0;
Nop(); //Setting a breakpoint here
}
I successfully see an "A" on my terminal screen when running the program, so settings are correct for TX. Typing in text in my terminal screen yields no interrupts. Have verified with scope that signal is making it to the PIC32.
What am I missing here? I'm burning a ton of time on something that should be trivial.
Thanks guys.
Figured it out... It should be:
//Int priorities
IPC39bits.U3EIP = 6;
IPC39bits.U3EIS = 3;
Interesting how this had been working in production on PIC32MX for years.

generating of mipmaps using vkCmdBlitImage for cubemap textures

what should be the parameters of VkImageBlit.dstOffsets and VkImageBlit.srcOffsets when we are doing dynamic generation of mipmaps?
I am doing layer by layer and for each mipmap level but somewhere it is going wrong, mostly i think offsets. So i have data which has all the six faces with 0th mipmap level.
for(int j=0; j< bufferCopyRegions.size(); j++) {
for (int32_t i = 1; i < mipLevels; i++)
{
VkImageBlit imageBlit{};
// Source
imageBlit.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
imageBlit.srcSubresource.layerCount = 1;
imageBlit.srcSubresource.mipLevel = 0;
imageBlit.srcOffsets[1].x = bitmapInfos[j].width;
imageBlit.srcOffsets[1].y = bitmapInfos[j].height;
imageBlit.srcOffsets[1].z = 1;
// Destination
imageBlit.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
imageBlit.dstSubresource.layerCount = 1;
imageBlit.dstSubresource.mipLevel = i;
imageBlit.dstOffsets[1].x = int32_t(bitmapInfos[j].width >> (i) == 0 ? 1 : int32_t(bitmapInfos[j].width >> (i )));
imageBlit.dstOffsets[1].y = int32_t(bitmapInfos[j].height >> (i) == 0 ? 1 : int32_t(bitmapInfos[j].height >> (i)));
imageBlit.dstOffsets[1].z = 1;
VkImageMemoryBarrier imageMemoryBarrier = {};
imageMemoryBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
imageMemoryBarrier.pNext = NULL;
imageMemoryBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
imageMemoryBarrier.subresourceRange.baseMipLevel = i;
imageMemoryBarrier.subresourceRange.levelCount = 1;
imageMemoryBarrier.subresourceRange.baseArrayLayer = j;
imageMemoryBarrier.subresourceRange.layerCount = 1;
// change layout of current mip level to transfer dest
setImageLayout(imageMemoryBarrier,
blitCmd,
image,
VK_IMAGE_ASPECT_COLOR_BIT,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, imageMemoryBarrier.subresourceRange,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_HOST_BIT);
// Do blit operation from previous mip level
vkCmdBlitImage(blitCmd, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, image,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &imageBlit, VK_FILTER_LINEAR);
setImageLayout(imageMemoryBarrier, blitCmd, image, VK_IMAGE_ASPECT_COLOR_BIT,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, imageMemoryBarrier.subresourceRange,
VK_PIPELINE_STAGE_HOST_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT);
}
}
I don't see baseArrayLayer of the imageBlit.srcSubresource and imageBlit.dstSubresource set to j. Which is probably your immediate problem.
Also your barriers seem bad to me. Only the top mip needs to be synchronized with host. But even so VK_PIPELINE_STAGE_HOST_BIT should not be necessary, because there is an exception for vkQueueSubmit saying it does this kind of synchronization implicitly if host writes ended before it being called (6.9. Host Write Ordering Guarantees and reminded in the Note in 6.1.3. Access Types).

ROBOTC - Programming Autonomous With Integrated Encoders

I have an X Drive that is coded in ROBOTC. My team and I have the integrated motor encoders already on the robot (for the autonomous period). However the code for them to run is incorrect. The current autonomous code is below. When I run it, it just goes forward forever and at different speeds.
I have looked at multiple tutorials, but none of them work. Does anyone have the code to make the motors (393 Motors) go at a count of 720?
#pragma config(I2C_Usage, I2C1, i2cSensors)
#pragma config(Sensor, I2C_1, sensorQuadEncoderOnI2CPort, AutoAssign)
#pragma config(Motor, port2, FL, tmotorVex393_MC29, PIDControl, encoderPort, I2C_1)
#pragma config(Motor, port3, BR, tmotorVex393_MC29, PIDControl, reversed, encoderPort, I2C_1)
#pragma config(Motor, port8, BL, tmotorVex393_MC29, PIDControl, encoderPort, I2C_1)
#pragma config(Motor, port9, FR, tmotorVex393_MC29, PIDControl, reversed, encoderPort, I2C_1)
//*!!Code automatically generated by 'ROBOTC' configuration wizard !!*//
task main()
{
// Autonomous with Integrated Encoders
nMotorPIDSpeedCtrl[FL] = mtrSpeedReg;
nMotorPIDSpeedCtrl[FR] = mtrSpeedReg;
nMotorPIDSpeedCtrl[BL] = mtrSpeedReg;
nMotorPIDSpeedCtrl[BR] = mtrSpeedReg;
//Clears motor values
nMotorEncoder[FL] = 0;
nMotorEncoder[FR] = 0;
nMotorEncoder[BL] = 0;
nMotorEncoder[BR] = 0;
//Forward
motor[FL] = 63;
motor[FR] = 63;
motor[BL] = 63;
motor[BR] = 63;
while(nMotorEncoder[FL] < 720) {
}
//Clears motor values
nMotorEncoder[FL] = 0;
nMotorEncoder[FR] = 0;
nMotorEncoder[BL] = 0;
nMotorEncoder[BR] = 0;
}
You need to explicitly stop the motors (not just zero out the encoders) after the while loop. Otherwise the robot doesn't know to stop; it just knows that it passed the encoder target.
So this code should work for you:
//Clears motor values
nMotorEncoder[FL] = 0;
nMotorEncoder[FR] = 0;
nMotorEncoder[BL] = 0;
nMotorEncoder[BR] = 0;
motor[FL] = 63;
motor[FR] = 63;
motor[BL] = 63;
motor[BR] = 63;
//Forward
while(nMotorEncoder[FL] < 720) {
}
//stops motors
motor[FL] = 0;
motor[FR] = 0;
motor[BL] = 0;
motor[BR] = 0;
//Clears motor encoder values
nMotorEncoder[FL] = 0;
nMotorEncoder[FR] = 0;
nMotorEncoder[BL] = 0;
nMotorEncoder[BR] = 0;

UART RX not working on PIC16F1704

I have got the UART TX working on one pic but cannot get the UART RX working on another PIC. My plan is to have the first PIC send data to the second PIC.
My initialisation code for the first PIC TX is,
Code:
void configure_TX_port(){
/*Port configurations*/
OSCCON = 0X68;
//Push button
TRISC3 = 1;
INLVLC3 = 0;
ANSC3 = 0;
//Led output
TRISC2 = 0;
//TX output
TRISA2 = 0;
ANSA2 = 0;
/*PPS setup for RA2*/
PPSLOCK = 0x55;
PPSLOCK = 0xAA;
PPSLOCK = 0;
RA2PPS = 0x14;
PPSLOCK = 0x55;
PPSLOCK = 0xAA;
PPSLOCK = 1;
/*UART configuration*/
TXEN = 1;
SYNC = 0;
SPEN = 1;
TXSTA = (0x4|0x20);
SPBRG = (int)(4000000L/(16UL * 9600) -1);
}
My send data to the tx code is
Code:
void putch(unsigned char byte) {
/* output one byte */
while (!TXIF) /* set when register is empty */
TXREG = byte;
}
My initialisation code for the second PIC RX is,
void configure_RX_port(){
/*Port configurations*/
OSCCON = 0X68;
//Led output
TRISC3 = 0;
//RX input
TRISC5 = 1;
ANSC5 = 0;
/*UART configuration*/
CREN = 1;
SYNC = 0;
SPEN = 1;
TXSTA = (0x4|0x20);
RCSTA = 0x90;
SPBRG = (int)(4000000L/(16UL * 9600) -1);
}
My receive data code is,
unsigned char getch(void) {
/* retrieve one byte */
unsigned char ret;
while (!RCIF) { /* set when register is not empty */
}
ret = RCREG;
return ret;
}
When I debug the code the getch function gets blocked waiting on a character but my other PIC is sending data. On this PIC RC5 is a designated RX pin so I dont think I have to do any pps configuration.
Rahul
TX1STA = 0b00100100; This enablex TX (TXEN=1) and high baud rate (BRGH = 1)
RC1STA = 0b10000000; This enable the serial port (SPEN = 1)
The only important missing part is your Clock setting and the baudrate you want to have.
I saw 4000000 in the formula, means 4MHz, and /9600, so assume 9600BDS).
Result = 0x25.
SPBRGL = 0x25;
SPBRGH = 0;
This way, your TX should work. Your tx function is good.
Be sure to configure RX and TX pins as DIGITAL by disabling ANSELA, ANSELB and ANSELC.
Your PIC also use PPS, so be sure to configure it the right way.
*********EDITED POST, RECEIVE CONDITION************
The only difference here to get a working receiver is to enable the continuous receiver
bit, CREN.
RC1STA = 0b10010000; //Enable serial port(SPEN) and continuous receive(CREN).
Be sure to set RX pin (RC5 in your case) as an INPUT (TRISC5 = 1) so that it can read any entering data. You should also consider doing an interrupt routine instead of polling the receiver flag bit. That way you're sure not to skip any entering data.
By default at reset all pins on PIC16F1704 are set as analog.
So clear coresponding bits of RX and TX pins in registers ANSELA, ANSELB and ANSELC to set tham as digital.
You look to be using asynchronous mode with SYNC = 0, but do not set TXEN = 1.
Setting CREN = 1 only overrides TXEN in synchronous mode. Try setting TXEN = 1.
I added the following line, TXSTA = (0x4|0x20); to the receiver PIC code and it works now. There is no need for
CREN = 1;
SYNC = 0;
SPEN = 1;
as its setting the same bits.

ExtAudioFileRead gives different result in iOS 5

I'm trying to extract Linear PCM data from a MP3 file.
Before iOS 5, I could do it successfully using AudioToolbox framework, specifically ExtAudioFileRead function.
However, in iOS 5 the ExtAudioFileRead function gives completely different result from that in iOS 4.
First, it cannot read all the packets in the source MP3 file.
For example, it reads only 1637 packets while the source MP3 file has 2212 packets in total.
Second, the PCM values obtained from the function are completely different from those obtained in iOS 4.
I can't figure out what I did wrong :(
The same framework, the same function, and the same code... but completely different results?
I doubt it is a bug of iOS 5 so I reported the problem to Apple already.
But Apple has not answer to my bug reporting for 2 weeks!
Here's the code that causes the problem.
After executing the code, I expect to have the correct PCM data in pcmBuffer.
In iOS 4, the code gives the result what I expected to.
But in iOS 5, the result is completely different and wrong.
Please, somebody help me!
OSStatus status;
ExtAudioFileRef fileRef;
CFURLRef fileURL = (CFURLRef)[NSURL fileURLWithPath:filePath];
status = ExtAudioFileOpenURL((CFURLRef)fileURL, &fileRef);
AudioStreamBasicDescription dataFormat;
dataFormat.mSampleRate = SAMPLE_RATE;
dataFormat.mFormatID = kAudioFormatLinearPCM;
dataFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
dataFormat.mFramesPerPacket = 1;
dataFormat.mChannelsPerFrame = 1;
dataFormat.mBitsPerChannel = 16;
dataFormat.mBytesPerPacket = 2;
dataFormat.mBytesPerFrame = 2;
UInt32 propDataSize;
AudioStreamBasicDescription originalDataFormat;
propDataSize = (UInt32)sizeof(originalDataFormat);
status = ExtAudioFileGetProperty(fileRef, kExtAudioFileProperty_FileDataFormat, &propDataSize, &originalDataFormat);
SInt64 numPackets;
propDataSize = sizeof(numPackets);
status = ExtAudioFileGetProperty(fileRef, kExtAudioFileProperty_FileLengthFrames, &propDataSize, &numPackets);
propDataSize = (UInt32)sizeof(dataFormat);
status = ExtAudioFileSetProperty(fileRef, kExtAudioFileProperty_ClientDataFormat, propDataSize, &dataFormat);
numPackets = (SInt64)numPackets / (SInt64)(originalDataFormat.mSampleRate / SAMPLE_RATE);
size_t bufferSize = (size_t)(numPackets * sizeof(SInt16));
SInt16 *pcmBuffer = (SInt16 *)malloc(bufferSize);
AudioBufferList bufList;
bufList.mNumberBuffers = 1;
bufList.mBuffers[0].mNumberChannels = 1;
bufList.mBuffers[0].mDataByteSize = bufferSize;
bufList.mBuffers[0].mData = pcmBuffer;
ExtAudioFileSeek(fileRef, 0);
UInt32 totalFramesRead = 0;
do {
UInt32 framesRead = numPackets - totalFramesRead;
bufList.mBuffers[0].mData = pcmBuffer + (totalFramesRead * (sizeof(SInt16)));
ExtAudioFileRead(fileRef, &framesRead, &bufList);
totalFramesRead += framesRead;
if(framesRead == 0) {
break;
}
NSLog(#"read %lu frames\n", framesRead);
} while (totalFramesRead < numPackets);
int totalPackets = totalFramesRead;
status = ExtAudioFileDispose(fileRef);
NSLog(#"numPackets : %lld, totalPackets : %d", numPackets, totalPackets);
Ouch. I noted that the number is different if the original sampling rate of the song is different. Back to square 1.