I have to encode .wav file and write it into same file,or other file using
ffmpeg library,here is my code for encoding
-(void)audioencode:(const char *)fileName
{
AVFrame *frame;
AVPacket pkt;
int i, j, k, ret, got_output;
int buffer_size;
FILE *f;
uint16_t *samples;
const char *format_name = "wav",
const char *file_url = "/Users/xxxx/Downloads/simple-drum-beat.wav";
avcodec_register_all();
av_register_all();
AVOutputFormat *format = NULL;
for (AVOutputFormat *formatIter = av_oformat_next(NULL); formatIter != NULL; formatIter = av_oformat_next(formatIter)
{
int hasEncoder = NULL != avcodec_find_encoder(formatIter->audio_codec);
if (0 == strcmp(format_name, formatIter->name)) {
format = formatIter;
break;
}
}
AVCodec *codec = avcodec_find_encoder(format->audio_codec);
NSLog(#"tet test tststs");
AVCodecContext *c;
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate audio codec context\n");
exit(1);
}
c->sample_fmt = AV_SAMPLE_FMT_S16;
if (!check_sample_fmt(codec, c->sample_fmt)) {
fprintf(stderr, "Encoder does not support sample format %s",
av_get_sample_fmt_name(c->sample_fmt));
exit(1);
}
c->bit_rate = 64000;//705600;
c->sample_rate = select_sample_rate(codec);
c->channel_layout = select_channel_layout(codec);
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
c->frame_size = av_get_audio_frame_duration(c, 16);
int bits_per_sample = av_get_bits_per_sample(c->codec_id);
int frameSize = av_get_audio_frame_duration(c,16);
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
f = fopen(fileName, "wb");
if (!f) {
fprintf(stderr, "Could not open %s\n", fileName);
exit(1);
}
/* frame containing input raw audio */
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate audio frame\n");
exit(1);
}
frame->nb_samples = frameSize/*c->frame_size*/;
frame->format = c->sample_fmt;
frame->channel_layout = c->channel_layout;
buffer_size = av_samples_get_buffer_size(NULL, c->channels,frameSize /*c->frame_size*/,
c->sample_fmt, 0);
samples = av_malloc(buffer_size);
if (!samples) {
fprintf(stderr, "Could not allocate %d bytes for samples buffer\n",
buffer_size);
exit(1);
}
/* setup the data pointers in the AVFrame */
ret = avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
(const uint8_t*)samples, buffer_size, 0);
if (ret < 0) {
fprintf(stderr, "Could not setup audio frame\n");
exit(1);
}
float t, tincr;
/* encode a single tone sound */
t = 0;
tincr = 2 * M_PI * 440.0 / c->sample_rate;
for(i=0;i<800;i++) {
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
for (j = 0; j < frameSize/*c->frame_size*/; j++) {
samples[2*j] = (int)(sin(t) * 10000);
for (k = 1; k < c->channels; k++)
samples[2*j + k] = samples[2*j];
t += tincr;
}
/* encode the samples */
ret = avcodec_encode_audio2(c, &pkt, frame, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding audio frame\n");
exit(1);
}
if (got_output) {
fwrite(pkt.data, 1, pkt.size, f);
av_free_packet(&pkt);
}
}
}
but after encoded file size is zero,
Please suggest what m doing wrong,any help will be appreciate, thanks in advance
Related
This is the Original code:
#include <stdio.h>
#include <string.h>
#include <assert.h>
#if defined(_WIN32) && !defined(__CYGWIN__)
#include <windows.h>
#else
#include <sys/select.h>
#endif
#include <sphinxbase/err.h>
#include <sphinxbase/ad.h>
#include "pocketsphinx.h"
static const arg_t cont_args_def[] = {
POCKETSPHINX_OPTIONS,
/* Argument file. */
{"-argfile",
ARG_STRING,
NULL,
"Argument file giving extra arguments."},
{"-adcdev",
ARG_STRING,
NULL,
"Name of audio device to use for input."},
{"-infile",
ARG_STRING,
NULL,
"Audio file to transcribe."},
{"-inmic",
ARG_BOOLEAN,
"no",
"Transcribe audio from microphone."},
{"-time",
ARG_BOOLEAN,
"no",
"Print word times in file transcription."},
CMDLN_EMPTY_OPTION
};
static ps_decoder_t *ps;
static cmd_ln_t *config;
static FILE *rawfd;
static void
print_word_times()
{
int frame_rate = cmd_ln_int32_r(config, "-frate");
ps_seg_t *iter = ps_seg_iter(ps);
while (iter != NULL) {
int32 sf, ef, pprob;
float conf;
ps_seg_frames(iter, &sf, &ef);
pprob = ps_seg_prob(iter, NULL, NULL, NULL);
conf = logmath_exp(ps_get_logmath(ps), pprob);
printf("%s %.3f %.3f %f\n", ps_seg_word(iter), ((float)sf / frame_rate),
((float) ef / frame_rate), conf);
iter = ps_seg_next(iter);
}
}
static int
check_wav_header(char *header, int expected_sr)
{
int sr;
if (header[34] != 0x10) {
E_ERROR("Input audio file has [%d] bits per sample instead of 16\n", header[34]);
return 0;
}
if (header[20] != 0x1) {
E_ERROR("Input audio file has compression [%d] and not required PCM\n", header[20]);
return 0;
}
if (header[22] != 0x1) {
E_ERROR("Input audio file has [%d] channels, expected single channel mono\n", header[22]);
return 0;
}
sr = ((header[24] & 0xFF) | ((header[25] & 0xFF) << 8) | ((header[26] & 0xFF) << 16) | ((header[27] & 0xFF) << 24));
if (sr != expected_sr) {
E_ERROR("Input audio file has sample rate [%d], but decoder expects [%d]\n", sr, expected_sr);
return 0;
}
return 1;
}
/*
* Continuous recognition from a file
*/
static void
recognize_from_file()
{
int16 adbuf[2048];
const char *fname;
const char *hyp;
int32 k;
uint8 utt_started, in_speech;
int32 print_times = cmd_ln_boolean_r(config, "-time");
fname = cmd_ln_str_r(config, "-infile");
if ((rawfd = fopen(fname, "rb")) == NULL) {
E_FATAL_SYSTEM("Failed to open file '%s' for reading",
fname);
}
if (strlen(fname) > 4 && strcmp(fname + strlen(fname) - 4, ".wav") == 0) {
char waveheader[44];
fread(waveheader, 1, 44, rawfd);
if (!check_wav_header(waveheader, (int)cmd_ln_float32_r(config, "-samprate")))
E_FATAL("Failed to process file '%s' due to format mismatch.\n", fname);
}
if (strlen(fname) > 4 && strcmp(fname + strlen(fname) - 4, ".mp3") == 0) {
E_FATAL("Can not decode mp3 files, convert input file to WAV 16kHz 16-bit mono before decoding.\n");
}
ps_start_utt(ps);
utt_started = FALSE;
while ((k = fread(adbuf, sizeof(int16), 2048, rawfd)) > 0) {
ps_process_raw(ps, adbuf, k, FALSE, FALSE);
in_speech = ps_get_in_speech(ps);
if (in_speech && !utt_started) {
utt_started = TRUE;
}
if (!in_speech && utt_started) {
ps_end_utt(ps);
hyp = ps_get_hyp(ps, NULL);
if (hyp != NULL)
printf("%s\n", hyp);
if (print_times)
print_word_times();
fflush(stdout);
ps_start_utt(ps);
utt_started = FALSE;
}
}
ps_end_utt(ps);
if (utt_started) {
hyp = ps_get_hyp(ps, NULL);
if (hyp != NULL) {
printf("%s\n", hyp);
if (print_times) {
print_word_times();
}
}
}
fclose(rawfd);
}
/* Sleep for specified msec */
static void
sleep_msec(int32 ms)
{
#if (defined(_WIN32) && !defined(GNUWINCE)) || defined(_WIN32_WCE)
Sleep(ms);
#else
/* ------------------- Unix ------------------ */
struct timeval tmo;
tmo.tv_sec = 0;
tmo.tv_usec = ms * 1000;
select(0, NULL, NULL, NULL, &tmo);
#endif
}
/*
* Main utterance processing loop:
* for (;;) {
* start utterance and wait for speech to process
* decoding till end-of-utterance silence will be detected
* print utterance result;
* }
*/
static void
recognize_from_microphone()
{
ad_rec_t *ad;
int16 adbuf[2048];
uint8 utt_started, in_speech;
int32 k;
char const *hyp;
if ((ad = ad_open_dev(cmd_ln_str_r(config, "-adcdev"),
(int) cmd_ln_float32_r(config,
"-samprate"))) == NULL)
E_FATAL("Failed to open audio device\n");
if (ad_start_rec(ad) < 0)
E_FATAL("Failed to start recording\n");
if (ps_start_utt(ps) < 0)
E_FATAL("Failed to start utterance\n");
utt_started = FALSE;
E_INFO("Ready....\n");
for (;;) {
if ((k = ad_read(ad, adbuf, 2048)) < 0)
E_FATAL("Failed to read audio\n");
ps_process_raw(ps, adbuf, k, FALSE, FALSE);
in_speech = ps_get_in_speech(ps);
if (in_speech && !utt_started) {
utt_started = TRUE;
E_INFO("Listening...\n");
}
if (!in_speech && utt_started) {
/* speech -> silence transition, time to start new utterance */
ps_end_utt(ps);
hyp = ps_get_hyp(ps, NULL );
if (hyp != NULL) {
printf("%s\n", hyp);
fflush(stdout);
}
if (ps_start_utt(ps) < 0)
E_FATAL("Failed to start utterance\n");
utt_started = FALSE;
E_INFO("Ready....\n");
}
sleep_msec(100);
}
ad_close(ad);
}
int
main(int argc, char *argv[])
{
char const *cfg;
config = cmd_ln_parse_r(NULL, cont_args_def, argc, argv, TRUE);
/* Handle argument file as -argfile. */
if (config && (cfg = cmd_ln_str_r(config, "-argfile")) != NULL) {
config = cmd_ln_parse_file_r(config, cont_args_def, cfg, FALSE);
}
if (config == NULL || (cmd_ln_str_r(config, "-infile") == NULL && cmd_ln_boolean_r(config, "-inmic") == FALSE)) {
E_INFO("Specify '-infile <file.wav>' to recognize from file or '-inmic yes' to recognize from microphone.\n");
cmd_ln_free_r(config);
return 1;
}
ps_default_search_args(config);
ps = ps_init(config);
if (ps == NULL) {
cmd_ln_free_r(config);
return 1;
}
E_INFO("%s COMPILED ON: %s, AT: %s\n\n", argv[0], __DATE__, __TIME__);
if (cmd_ln_str_r(config, "-infile") != NULL) {
recognize_from_file();
} else if (cmd_ln_boolean_r(config, "-inmic")) {
recognize_from_microphone();
}
ps_free(ps);
cmd_ln_free_r(config);
return 0;
}
#if defined(_WIN32_WCE)
#pragma comment(linker,"/entry:mainWCRTStartup")
#include <windows.h>
//Windows Mobile has the Unicode main only
int
wmain(int32 argc, wchar_t * wargv[])
{
char **argv;
size_t wlen;
size_t len;
int i;
argv = malloc(argc * sizeof(char *));
for (i = 0; i < argc; i++) {
wlen = lstrlenW(wargv[i]);
len = wcstombs(NULL, wargv[i], wlen);
argv[i] = malloc(len + 1);
wcstombs(argv[i], wargv[i], wlen);
}
//assuming ASCII parameters
return main(argc, argv);
}
#endif
I can compile it by this command:
g++ -o output continuous.cpp -DMODELDIR=\"`pkg-config --variable=modeldir pocketsphinx`\" `pkg-config --cflags --libs pocketsphinx sphinxbase`
And run it by this command : output -inmic yes .
But I like to convert the code as it has no need to get inmic yes and it automatically starts the program from microphone. But I got segmentation fault(core dumped) error when I changed these parts:
static const arg_t cont_args_def= {"-inmic",
ARG_BOOLEAN,
"no",
"Transcribe audio from microphone."};
int main(int argc, char *argv[])
{
config = cmd_ln_parse_r(NULL, cont_args_def, argc, argv, TRUE);
if (cmd_ln_boolean_r(config, "-inmic")) {
recognize_from_microphone();
}
// recognize_from_microphone();
ps_free(ps);
cmd_ln_free_r(config);
return 0;
}
I searched a lot and red the documentation but couldn't understand what's the problem?
Change the last argument passed to cmd_ln_parse_r from TRUE to FALSE.
It has something to do with strict checking.
I figured this out by reading the source code for cmd_ln.c in the sphinxbase code.
I also changed the boolean value for -inmic in cont_args_def from "no" to "yes".
How to decode a .wav file and write it into another file using ffmpeg?
I got decoded data by this piece of code:
-(void)audioDecode:(const char *)outfilename inFileName:(const char *)filename
{
const char* input_filename=filename;
//avcodec_register_all();
av_register_all();
avcodec_register_all();
// av_register_all();
//av_ini
// AVFormatContext* container=avformat_alloc_context();
// if(avformat_open_input(&container,filename,NULL,NULL)<0){
// NSLog(#"Could not open file");
// }
AVCodec *codec;
AVCodecContext *c= NULL;
int len;
FILE *f, *outfile;
uint8_t inbuf[AUDIO_INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
AVPacket avpkt;
AVFrame *decoded_frame = NULL;
av_init_packet(&avpkt);
printf("Decode audio file %s to %s\n", filename, outfilename);
avcodec_register_all();
AVFrame* frame = av_frame_alloc();
if (!frame)
{
fprintf(stderr, "Could not allocate audio frame\n");
exit(1);
}
AVFormatContext* formatContext = NULL;
/* Opening the file, and check if it has opened */
if (avformat_open_input(&formatContext, filename, NULL, NULL) != 0)
{
av_frame_free(&frame);
NSLog(#"Could not open file");
}
if (avformat_find_stream_info(formatContext, NULL) < 0)
{
av_frame_free(&frame);
avformat_close_input(&formatContext);
NSLog(#"Error finding the stream info");
}
outfile = fopen(outfilename, "wb");
if (!outfile) {
av_free(c);
exit(1);
}
/* Find the audio Stream, if no audio stream are found, clean and exit */
AVCodec* cdc = NULL;
int streamIndex = av_find_best_stream(formatContext, AVMEDIA_TYPE_AUDIO, -1, -1, &cdc, 0);
if (streamIndex < 0)
{
av_frame_free(&frame);
avformat_close_input(&formatContext);
NSLog(#"Could not find any audio stream in the file");
exit(1);
}
/* Open the audio stream to read data in audioStream */
AVStream* audioStream = formatContext->streams[streamIndex];
/* Initialize the codec context */
AVCodecContext* codecContext = audioStream->codec;
codecContext->codec = cdc;
/* Open the codec, and verify if it has opened */
if (avcodec_open2(codecContext, codecContext->codec, NULL) != 0)
{
av_frame_free(&frame);
avformat_close_input(&formatContext);
NSLog(#"Couldn't open the context with the decoder");
exit(1);
}
/* Initialize buffer to store compressed packets */
AVPacket readingPacket;
av_init_packet(&readingPacket);
int lenght = 1;
long long int chunk = ((formatContext->bit_rate)*lenght/8);
int j=0;
int count = 0;
//audioChunk output;
while(av_read_frame(formatContext, &readingPacket)==0){
//if((count+readingPacket.size)>start){
if(readingPacket.stream_index == audioStream->index){
AVPacket decodingPacket = readingPacket;
// Audio packets can have multiple audio frames in a single packet
while (decodingPacket.size > 0){
// Try to decode the packet into a frame
// Some frames rely on multiple packets, so we have to make sure the frame is finished before
// we can use it
int gotFrame = 0;
int result = avcodec_decode_audio4(codecContext, frame, &gotFrame, &decodingPacket);
count += result;
if (result >= 0 && gotFrame)
{
decodingPacket.size -= result;
decodingPacket.data += result;
int a;
for(int i=0;i<result-1;i++){
fwrite(frame->data[0], 1, decodingPacket.size, outfile);
// *(output.data+j)=frame->data[0][i];
//
j++;
if(j>=chunk) break;
}
// We now have a fully decoded audio frame
}
else
{
decodingPacket.size = 0;
decodingPacket.data = NULL;
}
// if(j>=chunk) break;
}
}
// }else count+=readingPacket.size;
//
// // To prevent memory leak, must free packet.
// av_free_packet(&readingPacket);
// if(j>=chunk) break;
}
fclose(outfile);
But the file is created with zero bytes. I don't know what's wrong with this code. And I got one more error: "Format adp detected only with low score of 25, misdetection possible!"
I am finding out usb mass storage related information using libusb library.
But don't know how to get usb mass storage size?
My tryout is:
void printdev(libusb_device *dev);
int main()
{
libusb_device **devs;
libusb_context *ctx = NULL; //a libusb session
int r;
ssize_t cnt; //holding number of devices in list
r = libusb_init(&ctx); //initialize a library session
if(r < 0)
{
cout<<"Init Error "<<r<<endl; //there was an error
return 1;
}
libusb_set_debug(ctx, 3); //set verbosity level to 3, as suggested in the documentation
cnt = libusb_get_device_list(ctx, &devs); //get the list of devices
if(cnt < 0)
{
cout<<"Get Device Error"<<endl; //there was an error
}
cout<<cnt<<" Devices in list."<<endl; //print total number of usb devices
int i;
for(i = 0; i < cnt; i++)
{
printdev(devs[i]);
}
libusb_free_device_list(devs, 1); //free the list, unref the devices in it
libusb_exit(ctx); //close the session
return 0;
}
void printdev(libusb_device *dev)
{
libusb_device_descriptor desc;
libusb_config_descriptor *conDesc;
char szBuffer[256] = {0};
unsigned char strDesc[256];
libusb_device_handle *devHandle = NULL;
int retVal;
__int64 i64Temp;
DWORD dwProdId;
DWORD dwProdId1;
i64Temp = 13888;
dwProdId = (DWORD)i64Temp;
retVal = libusb_open (dev, &devHandle);
if (retVal != LIBUSB_SUCCESS)
return;
int r = libusb_get_device_descriptor(dev, &desc);
if (r < 0)
{
cout<<"failed to get device descriptor"<<endl;
return;
}
r = libusb_get_config_descriptor(dev, 0, &conDesc);
printf("Interface Class = %d\n", conDesc->interface->altsetting->bInterfaceClass);
cout<<"Number of possible configurations: "<<(int)desc.bNumConfigurations<<" ";
// cout<<"Device Class: "<<desc.bDeviceClass<<endl;
// cout<<"Device Class: "<<desc.bDeviceSubClass<<endl;
printf("Class = %d\n", desc.bDeviceClass);
cout<<"VendorID: "<<desc.idVendor<<endl;
cout<<"ProductID: "<<desc.idProduct<<endl;
dwProdId1 = (DWORD)desc.idProduct;
if (dwProdId1 == dwProdId)
{
printf("in if\n");
}
else
{
printf("in else\n");
}
retVal = libusb_get_string_descriptor_ascii(devHandle, desc.iManufacturer, strDesc, 256);
printf ("Manufacturer: %s\n", strDesc);
retVal = libusb_get_string_descriptor_ascii(devHandle, desc.iSerialNumber, strDesc, 256);
printf ("SerialNumber: %s\n", strDesc);
retVal = libusb_get_string_descriptor_ascii(devHandle, desc.iProduct, strDesc, 256);
printf ("Product: %s\n", strDesc);
printf("\n\n");
}
I am working on an android project, which use vudroid, which in turn use mupdf version 0.5.
Vudroid remove the original openjpeg support of mupdf, I have ported the mupdf version 1.5's openjpeg support.
But I encounter a new problem, color information in jpx image gone, the desired effect:
my effect:
the ported load-jpx code:
#include "fitz.h"
#include "mupdf.h"
/* Without the definition of OPJ_STATIC, compilation fails on windows
* due to the use of __stdcall. We believe it is required on some
* linux toolchains too. */
#define OPJ_STATIC
#ifndef _MSC_VER
#define OPJ_HAVE_STDINT_H
#endif
#include <openjpeg.h>
static void fz_opj_error_callback(const char *msg, void *client_data)
{
//fz_context *ctx = (fz_context *)client_data;
//fz_warn(ctx, "openjpeg error: %s", msg);
}
static void fz_opj_warning_callback(const char *msg, void *client_data)
{
//fz_context *ctx = (fz_context *)client_data;
//fz_warn(ctx, "openjpeg warning: %s", msg);
}
static void fz_opj_info_callback(const char *msg, void *client_data)
{
/* fz_warn("openjpeg info: %s", msg); */
}
typedef struct stream_block_s
{
unsigned char *data;
int size;
int pos;
} stream_block;
static OPJ_SIZE_T fz_opj_stream_read(void * p_buffer, OPJ_SIZE_T p_nb_bytes, void * p_user_data)
{
stream_block *sb = (stream_block *)p_user_data;
int len;
len = sb->size - sb->pos;
if (len < 0)
len = 0;
if (len == 0)
return (OPJ_SIZE_T)-1; /* End of file! */
if ((OPJ_SIZE_T)len > p_nb_bytes)
len = p_nb_bytes;
memcpy(p_buffer, sb->data + sb->pos, len);
sb->pos += len;
return len;
}
static OPJ_OFF_T fz_opj_stream_skip(OPJ_OFF_T skip, void * p_user_data)
{
stream_block *sb = (stream_block *)p_user_data;
if (skip > sb->size - sb->pos)
skip = sb->size - sb->pos;
sb->pos += skip;
return sb->pos;
}
static OPJ_BOOL fz_opj_stream_seek(OPJ_OFF_T seek_pos, void * p_user_data)
{
stream_block *sb = (stream_block *)p_user_data;
if (seek_pos > sb->size)
return OPJ_FALSE;
sb->pos = seek_pos;
return OPJ_TRUE;
}
fz_error
fz_load_jpx(pdf_image* img, unsigned char *data, int size, fz_colorspace *defcs, int indexed)
{
//fz_pixmap *img;
opj_dparameters_t params;
opj_codec_t *codec;
opj_image_t *jpx;
opj_stream_t *stream;
fz_colorspace *colorspace;
unsigned char *p;
OPJ_CODEC_FORMAT format;
int a, n, w, h, depth, sgnd;
int x, y, k, v;
stream_block sb;
if (size < 2)
fz_throw("not enough data to determine image format");
/* Check for SOC marker -- if found we have a bare J2K stream */
if (data[0] == 0xFF && data[1] == 0x4F)
format = OPJ_CODEC_J2K;
else
format = OPJ_CODEC_JP2;
opj_set_default_decoder_parameters(¶ms);
if (indexed)
params.flags |= OPJ_DPARAMETERS_IGNORE_PCLR_CMAP_CDEF_FLAG;
codec = opj_create_decompress(format);
opj_set_info_handler(codec, fz_opj_info_callback, 0);
opj_set_warning_handler(codec, fz_opj_warning_callback, 0);
opj_set_error_handler(codec, fz_opj_error_callback, 0);
if (!opj_setup_decoder(codec, ¶ms))
{
fz_throw("j2k decode failed");
}
stream = opj_stream_default_create(OPJ_TRUE);
sb.data = data;
sb.pos = 0;
sb.size = size;
opj_stream_set_read_function(stream, fz_opj_stream_read);
opj_stream_set_skip_function(stream, fz_opj_stream_skip);
opj_stream_set_seek_function(stream, fz_opj_stream_seek);
opj_stream_set_user_data(stream, &sb);
/* Set the length to avoid an assert */
opj_stream_set_user_data_length(stream, size);
if (!opj_read_header(stream, codec, &jpx))
{
opj_stream_destroy(stream);
opj_destroy_codec(codec);
fz_throw("Failed to read JPX header");
}
if (!opj_decode(codec, stream, jpx))
{
opj_stream_destroy(stream);
opj_destroy_codec(codec);
opj_image_destroy(jpx);
fz_throw("Failed to decode JPX image");
}
opj_stream_destroy(stream);
opj_destroy_codec(codec);
/* jpx should never be NULL here, but check anyway */
if (!jpx)
fz_throw("opj_decode failed");
pdf_logimage("opj_decode succeeded");
for (k = 1; k < (int)jpx->numcomps; k++)
{
if (!jpx->comps[k].data)
{
opj_image_destroy(jpx);
fz_throw("image components are missing data");
}
if (jpx->comps[k].w != jpx->comps[0].w)
{
opj_image_destroy(jpx);
fz_throw("image components have different width");
}
if (jpx->comps[k].h != jpx->comps[0].h)
{
opj_image_destroy(jpx);
fz_throw("image components have different height");
}
if (jpx->comps[k].prec != jpx->comps[0].prec)
{
opj_image_destroy(jpx);
fz_throw("image components have different precision");
}
}
n = jpx->numcomps;
w = jpx->comps[0].w;
h = jpx->comps[0].h;
depth = jpx->comps[0].prec;
sgnd = jpx->comps[0].sgnd;
if (jpx->color_space == OPJ_CLRSPC_SRGB && n == 4) { n = 3; a = 1; }
else if (jpx->color_space == OPJ_CLRSPC_SYCC && n == 4) { n = 3; a = 1; }
else if (n == 2) { n = 1; a = 1; }
else if (n > 4) { n = 4; a = 1; }
else { a = 0; }
if (defcs)
{
if (defcs->n == n)
{
colorspace = defcs;
}
else
{
fz_warn("jpx file and dict colorspaces do not match");
defcs = NULL;
}
}
if (!defcs)
{
switch (n)
{
case 1: colorspace = pdf_devicegray; break;
case 3: colorspace = pdf_devicergb; break;
case 4: colorspace = pdf_devicecmyk; break;
}
}
//error = fz_new_pixmap(&img, colorspace, w, h);
//if (error)
// return error;
pdf_logimage("colorspace handled\n");
int bpc = 1;
if (colorspace) {
bpc = 1 + colorspace->n;
};
pdf_logimage("w = %d, bpc = %d, h = %d\n", w, bpc, h);
img->samples = fz_newbuffer(w * bpc * h);
//opj_image_destroy(jpx);
//fz_throw("out of memory loading jpx");
p = (char*)img->samples->bp;
pdf_logimage("start to deal with samples");
for (y = 0; y < h; y++)
{
for (x = 0; x < w; x++)
{
for (k = 0; k < n + a; k++)
{
v = jpx->comps[k].data[y * w + x];
if (sgnd)
v = v + (1 << (depth - 1));
if (depth > 8)
v = v >> (depth - 8);
*p++ = v;
}
if (!a)
*p++ = 255;
}
}
img->samples->wp = p;
pdf_logimage("start to deal with samples succeeded");
opj_image_destroy(jpx);
// if (a)
// {
// if (n == 4)
// {
// fz_pixmap *tmp = fz_new_pixmap(ctx, fz_device_rgb(ctx), w, h);
// fz_convert_pixmap(ctx, tmp, img);
// fz_drop_pixmap(ctx, img);
// img = tmp;
// }
// fz_premultiply_pixmap(ctx, img);
// }
return fz_okay;
}
The render code:
JNIEXPORT jbyteArray JNICALL Java_org_vudroid_pdfdroid_codec_PdfPage_drawPage
(JNIEnv *env, jclass clazz, jlong dochandle, jlong pagehandle)
{
renderdocument_t *doc = (renderdocument_t*) dochandle;
renderpage_t *page = (renderpage_t*) pagehandle;
//DEBUG("PdfView(%p).drawpage(%p, %p)", this, doc, page);
fz_error error;
fz_matrix ctm;
fz_irect viewbox;
fz_pixmap *pixmap;
jfloat *matrix;
jint *viewboxarr;
jint *dimen;
jint *buffer;
int length, val;
pixmap = nil;
/* initialize parameter arrays for MuPDF */
ctm.a = 1;
ctm.b = 0;
ctm.c = 0;
ctm.d = 1;
ctm.e = 0;
ctm.f = 0;
// matrix = (*env)->GetPrimitiveArrayCritical(env, matrixarray, 0);
// ctm.a = matrix[0];
// ctm.b = matrix[1];
// ctm.c = matrix[2];
// ctm.d = matrix[3];
// ctm.e = matrix[4];
// ctm.f = matrix[5];
// (*env)->ReleasePrimitiveArrayCritical(env, matrixarray, matrix, 0);
// DEBUG("Matrix: %f %f %f %f %f %f",
// ctm.a, ctm.b, ctm.c, ctm.d, ctm.e, ctm.f);
// viewboxarr = (*env)->GetPrimitiveArrayCritical(env, viewboxarray, 0);
// viewbox.x0 = viewboxarr[0];
// viewbox.y0 = viewboxarr[1];
// viewbox.x1 = viewboxarr[2];
// viewbox.y1 = viewboxarr[3];
// (*env)->ReleasePrimitiveArrayCritical(env, viewboxarray, viewboxarr, 0);
// DEBUG("Viewbox: %d %d %d %d",
// viewbox.x0, viewbox.y0, viewbox.x1, viewbox.y1);
viewbox.x0 = 0;
viewbox.y0 = 0;
viewbox.x1 = 595;
viewbox.y1 = 841;
/* do the rendering */
DEBUG("doing the rendering...");
//buffer = (*env)->GetPrimitiveArrayCritical(env, bufferarray, 0);
// do the actual rendering:
error = fz_rendertree(&pixmap, doc->rast, page->page->tree,
ctm, viewbox, 1);
/* evil magic: we transform the rendered image's byte order
*/
int x, y;
if (bmpdata)
fz_free(bmpdata);
bmpstride = ((pixmap->w * 3 + 3) / 4) * 4;
bmpdata = fz_malloc(pixmap->h * bmpstride);
DEBUG("inside drawpage, bmpstride = %d, pixmap->w = %d, pixmap->h = %d\n", bmpstride, pixmap->w, pixmap->h);
if (!bmpdata)
return;
for (y = 0; y < pixmap->h; y++)
{
unsigned char *p = bmpdata + y * bmpstride;
unsigned char *s = pixmap->samples + y * pixmap->w * 4;
for (x = 0; x < pixmap->w; x++)
{
p[x * 3 + 0] = s[x * 4 + 3];
p[x * 3 + 1] = s[x * 4 + 2];
p[x * 3 + 2] = s[x * 4 + 1];
}
}
FILE* fp = fopen("/sdcard/drawpage", "wb");
fwrite(bmpdata, pixmap->h * bmpstride, 1, fp);
fclose(fp);
jbyteArray array = (*env)->NewByteArray(env, pixmap->h * bmpstride);
(*env)->SetByteArrayRegion(env, array, 0, pixmap->h * bmpstride, bmpdata);
// if(!error) {
// DEBUG("Converting image buffer pixel order");
// length = pixmap->w * pixmap->h;
// unsigned int *col = pixmap->samples;
// int c = 0;
// for(val = 0; val < length; val++) {
// col[val] = ((col[val] & 0xFF000000) >> 24) |
// ((col[val] & 0x00FF0000) >> 8) |
// ((col[val] & 0x0000FF00) << 8);
// }
// winconvert(pixmap);
// }
// (*env)->ReleasePrimitiveArrayCritical(env, bufferarray, buffer, 0);
fz_free(pixmap);
if (error) {
DEBUG("error!");
throw_exception(env, "error rendering page");
}
DEBUG("PdfView.drawPage() done");
return array;
}
I have compare the jpx output samples to the mupdf-1.5 windows, it is the same, but the colorspace of original jpx have gone.
Could help me to get the colorspace back?
It seems you are trying to use an old version of MuPDF with some bits pulled in from a more recent version. TO be honest that's hardly likely to work. I would also guess that its not the OpenJPEG library causing your problem, since the image appears, but converted to grayscale.
Have you tried opening the file in the current version of MuPDF ? Does it work ?
If so then it seems to me your correct approach should be to use the current code, not try and bolt pieces onto an older version.
I just cann't seem to get the FFMpeg working with using the library. Everytime I try to convert asf file to wmv. I get the following issue on run time:
[wmv1 # 0x81ee000]error, slice code was 2
[wmv1 # 0x81ee000]header damaged
This my code:
static void audio_decode_example(const char *outfilename, const char *filename)
{
AVCodec *codec;
AVCodecContext *c= NULL;
int out_size, len, in_size;
FILE *f, *outfile;
uint8_t *outbuf;
uint8_t inbuf[AUDIO_INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
AVPacket avpkt;
av_init_packet(&avpkt);
printf("Audio decoding\n");
/* find the mpeg audio decoder */
codec = avcodec_find_decoder(CODEC_ID_WMV1);
if (!codec) {
fprintf(stderr, "codec not found\n");
return;
}
c= avcodec_alloc_context2(CODEC_TYPE_AUDIO);
/* open it */
if (avcodec_open(c, codec) < 0) {
fprintf(stderr, "could not open codec\n");
return;
}
outbuf = malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
f = fopen(filename, "rb");
if (!f) {
fprintf(stderr, "could not open %s\n", filename);
return;
}
outfile = fopen(outfilename, "wb");
if (!outfile) {
av_free(c);
return;
}
/* decode until eof */
avpkt.data = inbuf;
len = avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);
NSLog(#"%d", avpkt.size);
while (avpkt.size > 0) {
out_size = AVCODEC_MAX_AUDIO_FRAME_SIZE;
len = avcodec_decode_audio2(c, (short *)outbuf, &out_size, inbuf,len);// avpkt.size);
NSLog(#"%d", len);
if (len < 0) {
fprintf(stderr, "Error while decoding\n");
fclose(outfile);
return;
}
if (out_size > 0) {
/* if a frame has been decoded, output it */
fwrite(outbuf, 1, out_size, outfile);
}
avpkt.size -= len;
avpkt.data += len;
if (avpkt.size < AUDIO_REFILL_THRESH) {
/* Refill the input buffer, to avoid trying to decode
* incomplete frames. Instead of this, one could also use
* a parser, or use a proper container format through
* libavformat. */
memmove(inbuf, avpkt.data, avpkt.size);
avpkt.data = inbuf;
len = fread(avpkt.data + avpkt.size, 1,
INBUF_SIZE - avpkt.size, f);
if (len > 0)
avpkt.size += len;
}
}
fclose(outfile);
fclose(f);
free(outbuf);
avcodec_close(c);
av_free(c);
}
I have try the command line utilities and it successfully convert the file. Any help would be helpfully. thanks
Make the mistake of opening the wrong file