Related
I am running Jmeter scripts from the command line. While running I get this summary after every request. I understood from the documentation that we need comment or set summariser.name=summary to none. I don't want to see this summary. Pl. let me know how to disable it.
00:44:10.785 summary + 6 in 00:00:32 = 0.2/s Avg: 241 Min: 2 Max: 1239 Err: 1 (16.67%) Active: 1 Started: 1 Finished: 0
00:44:10.785 summary = 498 in 00:39:27 = 0.2/s Avg: 126 Min: 0 Max: 2851 Err: 32 (6.43%)
00:44:42.892 summary + 7 in 00:00:31 = 0.2/s Avg: 88 Min: 0 Max: 418 Err: 0 (0.00%) Active: 1 Started: 1 Finished: 0
00:44:42.892 summary = 505 in 00:39:57 = 0.2/s Avg: 126 Min: 0 Max: 2851 Err: 32 (6.34%)
00:45:14.999 summary + 6 in 00:00:31 = 0.2/s Avg: 73 Min: 2 Max: 216 Err: 0 (0.00%) Active: 1 Started: 1 Finished: 0
00:45:14.999 summary = 511 in 00:40:28 = 0.2/s Avg: 125 Min: 0 Max: 2851 Err: 32 (6.26%)
00:45:41.565 summary + 6 in 00:00:31 = 0.2/s Avg: 68 Min: 2 Max: 205 Err: 0 (0.00%) Active: 1 Started: 1 Finished: 0
00:45:41.565 summary = 517 in 00:40:58 = 0.2/s Avg: 125 Min: 0 Max: 2851 Err: 32 (6.19%)
00:46:13.681 summary + 6 in 00:00:31 = 0.2/s Avg: 103 Min: 2 Max: 384 Err: 0 (0.00%) Active: 1 Started: 1 Finished: 0
00:46:13.681 summary = 523 in 00:41:29 = 0.2/s Avg: 124 Min: 0 Max: 2851 Err: 32 (6.12%)
If you don't want to see the summariser output in the console you can amend your command to
jmeter -Jsummariser.out=false -n -t test.jmx -l result.jtl
in order to make the change permanent - put this line: summariser.out=false to user.properties file.
If you want to turn off the summariser completely:
Open jmeter.properties file with your favourite text editor
Locate this line
summariser.name=summary
and either comment it by putting # character in front of it:
#summariser.name=summary
or just simply delete it
That's it, you won't see summariser output on next execution
More information:
Summariser - Generate Summary Results - configuration
Configuring JMeter
Apache JMeter Properties Customization Guide
I am trying to use tensorflow_probability to construct a mcmc chain. This is my code:
chain_states, kernel_results = tfp.mcmc.sample_chain(
num_results=tf.constant(1e3, dtype=tf.int32),
num_burnin_steps=tf.constant(1e2, dtype=tf.int32),
parallel_iterations=tf.constant(10, dtype=tf.int32),
current_state=current_state,
kernel=tfp.mcmc.MetropolisHastings(
inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=joint_log_prob,
num_leapfrog_steps=tf.constant(2, dtype=tf.int32),
step_size=tf.Variable(1.),
step_size_update_fn=tfp.mcmc.make_simple_step_size_update_policy()
)))
But I got this error::
> InvalidArgumentError Traceback (most recent call last) <ipython-input-13-7e972cc65053> in <module>()
> ----> 1 make_model(well_complex, well_ligand, fi_complex, fi_ligand)
>
> ~/Documents/GitHub/assaytools2/assaytools2/assaytools2/inference.py in
> make_model(well_complex, well_ligand, fi_complex, fi_ligand)
> 162 num_leapfrog_steps=tf.constant(2, dtype=tf.int32),
> 163 step_size=tf.Variable(1.),
> --> 164 step_size_update_fn=tfp.mcmc.make_simple_step_size_update_policy()
> 165 )))
> 166
>
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow_probability/python/mcmc/sample.py
> in sample_chain(num_results, current_state, previous_kernel_results,
> kernel, num_burnin_steps, num_steps_between_results,
> parallel_iterations, name)
> 238
> 239 if previous_kernel_results is None:
> --> 240 previous_kernel_results = kernel.bootstrap_results(current_state)
> 241 return tf.scan(
> 242 fn=_scan_body,
>
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow_probability/python/mcmc/metropolis_hastings.py
> in bootstrap_results(self, init_state)
> 261 name=mcmc_util.make_name(self.name, 'mh', 'bootstrap_results'),
> 262 values=[init_state]):
> --> 263 pkr = self.inner_kernel.bootstrap_results(init_state)
> 264 if not has_target_log_prob(pkr):
> 265 raise ValueError(
>
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow_probability/python/mcmc/hmc.py
> in bootstrap_results(self, init_state)
> 506 def bootstrap_results(self, init_state):
> 507 """Creates initial `previous_kernel_results` using a supplied `state`."""
> --> 508 kernel_results = self._impl.bootstrap_results(init_state)
> 509 if self.step_size_update_fn is not None:
> 510 step_size_assign = self.step_size_update_fn(self.step_size, None) # pylint:
> disable=not-callable
>
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow_probability/python/mcmc/metropolis_hastings.py
> in bootstrap_results(self, init_state)
> 261 name=mcmc_util.make_name(self.name, 'mh', 'bootstrap_results'),
> 262 values=[init_state]):
> --> 263 pkr = self.inner_kernel.bootstrap_results(init_state)
> 264 if not has_target_log_prob(pkr):
> 265 raise ValueError(
>
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow_probability/python/mcmc/hmc.py
> in bootstrap_results(self, init_state)
> 672 init_target_log_prob,
> 673 init_grads_target_log_prob,
> --> 674 ] = mcmc_util.maybe_call_fn_and_grads(self.target_log_prob_fn, init_state)
> 675 return UncalibratedHamiltonianMonteCarloKernelResults(
> 676 log_acceptance_correction=tf.zeros_like(init_target_log_prob),
>
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow_probability/python/mcmc/util.py
> in maybe_call_fn_and_grads(fn, fn_arg_list, result, grads,
> check_non_none_grads, name)
> 232 fn_arg_list = (list(fn_arg_list) if is_list_like(fn_arg_list)
> 233 else [fn_arg_list])
> --> 234 result, grads = _value_and_gradients(fn, fn_arg_list, result, grads)
> 235 if not all(r.dtype.is_floating
> 236 for r in (result if is_list_like(result) else [result])): # pylint: disable=superfluous-parens
>
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow_probability/python/mcmc/util.py
> in _value_and_gradients(fn, fn_arg_list, result, grads, name)
> 207 ]
> 208 else:
> --> 209 grads = tfe.gradients_function(fn)(*fn_arg_list)
> 210 else:
> 211 if is_list_like(result) and len(result) == len(fn_arg_list):
>
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/eager/backprop.py
> in decorated(*args, **kwds)
> 368 """Computes the gradient of the decorated function."""
> 369
> --> 370 _, grad = val_and_grad_function(f, params=params)(*args, **kwds)
> 371 return grad
> 372
>
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/eager/backprop.py
> in decorated(*args, **kwds)
> 469 "receive keyword arguments.")
> 470 val, vjp = make_vjp(f, params)(*args, **kwds)
> --> 471 return val, vjp(dy=dy)
> 472
> 473 return decorated
>
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/eager/backprop.py
> in vjp(dy)
> 539 return imperative_grad.imperative_grad(
> 540 _default_vspace, this_tape, nest.flatten(result), sources,
> --> 541 output_gradients=dy)
> 542 return result, vjp
> 543
>
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/eager/imperative_grad.py
> in imperative_grad(vspace, tape, target, sources, output_gradients)
> 61 """
> 62 return pywrap_tensorflow.TFE_Py_TapeGradient(
> ---> 63 tape._tape, vspace, target, sources, output_gradients) # pylint: disable=protected-access
>
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/eager/backprop.py
> in _gradient_function(op_name, attr_tuple, num_inputs, inputs,
> outputs, out_grads)
> 115 return [None] * num_inputs
> 116
> --> 117 return grad_fn(mock_op, *out_grads)
> 118
> 119
>
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/ops/math_grad.py
> in _ProdGrad(op, grad)
> 158 with ops.device("/cpu:0"):
> 159 rank = array_ops.rank(op.inputs[0])
> --> 160 reduction_indices = (reduction_indices + rank) % rank
> 161 reduced = math_ops.cast(reduction_indices, dtypes.int32)
> 162 idx = math_ops.range(0, rank)
>
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/ops/math_ops.py
> in binary_op_wrapper(x, y)
> 860 with ops.name_scope(None, op_name, [x, y]) as name:
> 861 if isinstance(x, ops.Tensor) and isinstance(y, ops.Tensor):
> --> 862 return func(x, y, name=name)
> 863 elif not isinstance(y, sparse_tensor.SparseTensor):
> 864 try:
>
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/ops/gen_math_ops.py
> in add(x, y, name)
> 322 else:
> 323 message = e.message
> --> 324 _six.raise_from(_core._status_to_exception(e.code, message), None)
> 325
> 326
>
> ~/anaconda2/envs/py36/lib/python3.6/site-packages/six.py in
> raise_from(value, from_value)
>
> InvalidArgumentError: cannot compute Add as input #0(zero-based) was
> expected to be a int32 tensor but is a int64 tensor [Op:Add] name:
> mcmc_sample_chain/mh_bootstrap_results/mh_bootstrap_results/hmc_kernel_bootstrap_results/maybe_call_fn_and_grads/value_and_gradients/add/
I doubled checked and none of my initial tensors were of integer type.
I wonder where I did it wrong.
Thanks!
I'm trying to run autossh (on a VM running CentOS6), but it's exiting immediately with the help message. I think this is a system issue because when I run it with the exact same parameters on another computer (running Ubuntu 14.04) it works fine. It's also fine when I run the same command but with ssh instead of autossh. So I tried strace to see if anything's wrong there. But if there is I'm not sure what it is. Any ideas?
Here's the autossh command: autossh -oStrictHostKeyChecking=no -oServerAliveInterval=15 -oServerAliveCountMax=4 -L 3130:localhost:3130 -N -i /path/to/some.pem user#remotehost
Here's the strace output (note myserver is an entry in .ssh/config that contains the same parameters as the previous command):
execve("/usr/local/bin/autossh", ["autossh", "myserver"], [/* 55 vars */]) = 0
brk(0) = 0xefc000
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7f26193cc000
access("/etc/ld.so.preload", R_OK) = -1 ENOENT (No such file or directory)
open("/etc/ld.so.cache", O_RDONLY) = 3
fstat(3, {st_mode=S_IFREG|0644, st_size=36751, ...}) = 0
mmap(NULL, 36751, PROT_READ, MAP_PRIVATE, 3, 0) = 0x7f26193c3000
close(3) = 0
open("/lib64/libnsl.so.1", O_RDONLY) = 3
read(3, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0p#\0\0\0\0\0\0"..., 832) = 832
fstat(3, {st_mode=S_IFREG|0755, st_size=113432, ...}) = 0
mmap(NULL, 2198192, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 3, 0) = 0x7f2618f95000
mprotect(0x7f2618fab000, 2093056, PROT_NONE) = 0
mmap(0x7f26191aa000, 8192, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 3, 0x15000) = 0x7f26191aa000
mmap(0x7f26191ac000, 6832, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) = 0x7f26191ac000
close(3) = 0
open("/lib64/libc.so.6", O_RDONLY) = 3
read(3, "\177ELF\2\1\1\3\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0p\356\1\0\0\0\0\0"..., 832) = 832
fstat(3, {st_mode=S_IFREG|0755, st_size=1920936, ...}) = 0
mmap(NULL, 3750152, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 3, 0) = 0x7f2618c01000
mprotect(0x7f2618d8b000, 2097152, PROT_NONE) = 0
mmap(0x7f2618f8b000, 20480, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 3, 0x18a000) = 0x7f2618f8b000
mmap(0x7f2618f90000, 18696, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) = 0x7f2618f90000
close(3) = 0
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7f26193c2000
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7f26193c1000
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7f26193c0000
arch_prctl(ARCH_SET_FS, 0x7f26193c1700) = 0
mprotect(0x7f2618f8b000, 16384, PROT_READ) = 0
mprotect(0x7f26191aa000, 4096, PROT_READ) = 0
mprotect(0x7f26193cd000, 4096, PROT_READ) = 0
munmap(0x7f26193c3000, 36751) = 0
write(2, "usage: autossh [-V] [-M monitor_"..., 69usage: autossh [-V] [-M monitor_port[:echo_port]] [-f] [SSH_OPTIONS]
) = 69
write(2, "\n", 1
) = 1
write(2, " -M specifies monitor port. M"..., 238 -M specifies monitor port. May be overridden by environment
variable AUTOSSH_PORT. 0 turns monitoring loop off.
Alternatively, a port for an echo service on the remote
machine may be specified. (Normally port 7.)
) = 238
write(2, " -f run in background (autoss"..., 85 -f run in background (autossh handles this, and does not
pass it to ssh.)
) = 85
write(2, " -V print autossh version and"..., 39 -V print autossh version and exit.
) = 39
write(2, "\n", 1
) = 1
write(2, "Environment variables are:\n", 27Environment variables are:
) = 27
write(2, " AUTOSSH_GATETIME - how lo"..., 259 AUTOSSH_GATETIME - how long must an ssh session be established
before we decide it really was established
(in seconds). Default is 30 seconds; use of -f
flag sets this to 0.
) = 259
write(2, " AUTOSSH_LOGFILE - file t"..., 107 AUTOSSH_LOGFILE - file to log to (default is to use the syslog
facility)
) = 107
write(2, " AUTOSSH_LOGLEVEL - level "..., 49 AUTOSSH_LOGLEVEL - level of log verbosity
) = 49
write(2, " AUTOSSH_MAXLIFETIME - set th"..., 65 AUTOSSH_MAXLIFETIME - set the maximum time to live (seconds)
) = 65
write(2, " AUTOSSH_MAXSTART - max ti"..., 69 AUTOSSH_MAXSTART - max times to restart (default is no limit)
) = 69
write(2, " AUTOSSH_MESSAGE - messag"..., 74 AUTOSSH_MESSAGE - message to append to echo string (max 64 bytes)
) = 74
write(2, " AUTOSSH_PATH - path t"..., 53 AUTOSSH_PATH - path to ssh if not default
) = 53
write(2, " AUTOSSH_PIDFILE - write "..., 49 AUTOSSH_PIDFILE - write pid to this file
) = 49
write(2, " AUTOSSH_POLL - how of"..., 70 AUTOSSH_POLL - how often to check the connection (seconds)
) = 70
write(2, " AUTOSSH_FIRST_POLL - time b"..., 71 AUTOSSH_FIRST_POLL - time before first connection check (seconds)
) = 71
write(2, " AUTOSSH_PORT - port t"..., 61 AUTOSSH_PORT - port to use for monitor connection
) = 61
write(2, " AUTOSSH_DEBUG - turn l"..., 104 AUTOSSH_DEBUG - turn logging to maximum verbosity and log to
stderr
) = 104
write(2, "\n", 1
) = 1
exit_group(1) = ?
+++ exited with 1 +++
I had exactly the same problem with autossh 1.4e on CentOS Linux 7. autossh stopped immediately and printed the help, without even trying to connect to SSH.
The solution was to specify -M 0 on the command line:
autossh -M 0 \
-oStrictHostKeyChecking=no \
-oServerAliveInterval=15 \
-oServerAliveCountMax=4 \
-L 3130:localhost:3130 \
-N -i /path/to/some.pem user#remotehost
Come on ... the autossh exits with
write(2, "usage: autossh [-V] [-M monitor_"..., 69usage: autossh [-V] [-M monitor_port[:echo_port]] [-f] [SSH_OPTIONS]
which means that you specified wrong arguments and it is trying to explain you how the syntax looks like. To see what is wrong, there are several things you can try:
Use -vvv switches to ssh to see more verbose log from the ssh.
Use AUTOSSH_DEBUG environment variable to get some debug logs from autossh.
When I run my code valgrind is giving this error.
Invalid free() / delete / delete[] / realloc()
==7363== at 0x4C2A82E: free (in /usr/lib/valgrind/vgpreload_memcheck-amd64-linux.so)
==7363== by 0x4009F8: main (in /home/arihant/ELF/elf)
==7363== Address 0x51f3fa0 is 0 bytes inside a block of size 16 free'd
==7363== at 0x4C2A82E: free (in /usr/lib/valgrind/vgpreload_memcheck-amd64-linux.so)
==7363== by 0x4009B1: main (in /home/arihant/ELF/elf)
==7363==
==7363==
==7363== HEAP SUMMARY:
==7363== in use at exit: 160 bytes in 4 blocks
==7363== total heap usage: 448 allocs, 448 frees, 34,684 bytes allocated
==7363==
==7363== LEAK SUMMARY:
==7363== definitely lost: 160 bytes in 4 blocks
==7363== indirectly lost: 0 bytes in 0 blocks
==7363== possibly lost: 0 bytes in 0 blocks
==7363== still reachable: 0 bytes in 0 blocks
==7363== suppressed: 0 bytes in 0 blocks
==7363== Rerun with --leak-check=full to see details of leaked memory
==7363==
==7363== For counts of detected and suppressed errors, rerun with: -v
==7363== ERROR SUMMARY: 6 errors from 2 contexts (suppressed: 2 from 2)
why I am getting 160 bytes lost though number of allocs and free are equal
my code is
18 fp = fopen("output", "r");
19 obj_elf = (Elf32_Ehdr *)malloc(sizeof(Elf32_Ehdr));
20 fread(obj_elf, 1, sizeof(Elf32_Ehdr), fp);
21
22 if (argc < 2) {
23 print_menu();
24 free(obj_elf);
25 fclose(fp);
26 return 0;
27 }
28
29 if (argv[1][0] == '-') {
30 switch (argv[1][1]) {
31 case 'e':
32 elf_header(obj_elf); /*elf header function call*/
33 break;
34 case 's':
35 read_section_header(fp, obj_elf, obj_sect_hdr);
36 print_section_header(fp, obj_sect_hdr, obj_elf);
37
38 for (i = 0; i < obj_elf->e_shnum; i++) {
39 free(obj_sect_hdr[i]);
40 free(sec_name[i]);
41 }
42 break;
43 case 'S':
44 read_section_header(fp, obj_elf, obj_sect_hdr);
45 read_symbol_table(fp, obj_elf, obj_sect_hdr, obj_sym);
46 symbol_table(fp, obj_elf, obj_sect_hdr, obj_sym);
47
48 for (i = 0; i < obj_elf->e_shnum; i++) {
49 free(obj_sect_hdr[i]);
50 free(sec_name[i]);
51 }
52 for (i = 0; i < n_entries_sym_t; i++) {
53 free(obj_sym[i]);
54 free(symbol_name[i]);
55 }
56
In main function there are many free how will I know which free is invalid??
Your answer is at the top of your output:
Invalid free() / delete / delete[] / realloc()
==7363== at 0x4C2A82E: free (in /usr/lib/valgrind/vgpreload_memcheck-amd64-linux.so)
==7363== by 0x4009F8: main (in /home/arihant/ELF/elf)
==7363== Address 0x51f3fa0 is 0 bytes inside a block of size 16 free'd
==7363== at 0x4C2A82E: free (in /usr/lib/valgrind/vgpreload_memcheck-amd64-linux.so)
==7363== by 0x4009B1: main (in /home/arihant/ELF/elf)
Just matching up the number of allocations and frees is not sufficient when one or more of your calls to free is invalid. You will need to build with debugging to see which free() is the problem. It looks like it's probably something allocated in read_section_header() or something else not listed.
I have created a package using the encoding utf-8.
When calling a function, it returns a DataFrame, with a column coded in utf-8.
When using IPython at the command line, I don't have any problems showing the content of this table. When using the Notebook, it crashes with the error 'utf8' codec can't decode byte 0xe7. I've attached a full traceback below.
What is the proper encoding to work with Notebook?
UnicodeDecodeError Traceback (most recent call last)
<ipython-input-13-92c0011919e7> in <module>()
3 ver = verif.VerificacaoNA()
4 comp, total = ver.executarCompRealFisica(DT_INI, DT_FIN)
----> 5 comp
c:\Python27-32\lib\site-packages\ipython-0.13.1-py2.7.egg\IPython\core\displayhook.pyc in __call__(self, result)
240 self.update_user_ns(result)
241 self.log_output(format_dict)
--> 242 self.finish_displayhook()
243
244 def flush(self):
c:\Python27-32\lib\site-packages\ipython-0.13.1-py2.7.egg\IPython\zmq\displayhook.pyc in finish_displayhook(self)
59 sys.stdout.flush()
60 sys.stderr.flush()
---> 61 self.session.send(self.pub_socket, self.msg, ident=self.topic)
62 self.msg = None
63
c:\Python27-32\lib\site-packages\ipython-0.13.1-py2.7.egg\IPython\zmq\session.pyc in send(self, stream, msg_or_type, content, parent, ident, buffers, subheader, track, header)
557
558 buffers = [] if buffers is None else buffers
--> 559 to_send = self.serialize(msg, ident)
560 flag = 0
561 if buffers:
c:\Python27-32\lib\site-packages\ipython-0.13.1-py2.7.egg\IPython\zmq\session.pyc in serialize(self, msg, ident)
461 content = self.none
462 elif isinstance(content, dict):
--> 463 content = self.pack(content)
464 elif isinstance(content, bytes):
465 # content is already packed, as in a relayed message
c:\Python27-32\lib\site-packages\ipython-0.13.1-py2.7.egg\IPython\zmq\session.pyc in <lambda>(obj)
76
77 # ISO8601-ify datetime objects
---> 78 json_packer = lambda obj: jsonapi.dumps(obj, default=date_default)
79 json_unpacker = lambda s: extract_dates(jsonapi.loads(s))
80
c:\Python27-32\lib\site-packages\pyzmq-13.0.0-py2.7-win32.egg\zmq\utils\jsonapi.pyc in dumps(o, **kwargs)
70 kwargs['separators'] = (',', ':')
71
---> 72 return _squash_unicode(jsonmod.dumps(o, **kwargs))
73
74 def loads(s, **kwargs):
c:\Python27-32\lib\json\__init__.pyc in dumps(obj, skipkeys, ensure_ascii, check_circular, allow_nan, cls, indent, separators, encoding, default, **kw)
236 check_circular=check_circular, allow_nan=allow_nan, indent=indent,
237 separators=separators, encoding=encoding, default=default,
--> 238 **kw).encode(obj)
239
240
c:\Python27-32\lib\json\encoder.pyc in encode(self, o)
199 # exceptions aren't as detailed. The list call should be roughly
200 # equivalent to the PySequence_Fast that ''.join() would do.
--> 201 chunks = self.iterencode(o, _one_shot=True)
202 if not isinstance(chunks, (list, tuple)):
203 chunks = list(chunks)
c:\Python27-32\lib\json\encoder.pyc in iterencode(self, o, _one_shot)
262 self.key_separator, self.item_separator, self.sort_keys,
263 self.skipkeys, _one_shot)
--> 264 return _iterencode(o, 0)
265
266 def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
UnicodeDecodeError: 'utf8' codec can't decode byte 0xe7 in position 199: invalid continuation byte
I had the same problem recently, and indeed setting the default encoding to UTF-8 did the trick:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
Running sys.getdefaultencoding() yielded 'ascii' on my environment (Python 2.7.3), so I guess that's the default.
Also see this related question and Ian Bicking's blog post on the subject.