From 328d4026b16815807e43a26f60988da3de8c05a4 Mon Sep 17 00:00:00 2001 From: Fabio Zadrozny Date: Fri, 11 Mar 2022 10:02:17 -0300 Subject: [PATCH] Drop support for Python 2.7. --- .../_pydev_bundle/_pydev_calltip_util.py | 13 +- .../pydevd/_pydev_bundle/_pydev_completer.py | 30 +- .../_pydev_bundle/_pydev_imports_tipper.py | 22 +- .../_pydev_bundle/_pydev_jy_imports_tipper.py | 169 +- .../pydevd/_pydev_bundle/_pydev_log.py | 22 +- .../_pydev_bundle/_pydev_tipper_common.py | 21 +- .../_pydev_bundle/pydev_console_utils.py | 20 +- .../pydevd/_pydev_bundle/pydev_imports.py | 49 +- .../_pydev_bundle/pydev_ipython_console.py | 13 +- .../pydev_ipython_console_011.py | 5 +- .../pydevd/_pydev_bundle/pydev_localhost.py | 13 +- .../pydevd/_pydev_bundle/pydev_monkey.py | 8 +- .../_pydev_imps/_pydev_BaseHTTPServer.py | 604 --- .../_pydev_imps/_pydev_SimpleXMLRPCServer.py | 601 --- .../pydevd/_pydev_imps/_pydev_SocketServer.py | 715 --- .../pydevd/_pydev_imps/_pydev_inspect.py | 788 ---- .../pydevd/_pydev_imps/_pydev_pkgutil_old.py | 591 --- .../_pydev_imps/_pydev_saved_modules.py | 38 +- .../pydevd/_pydev_imps/_pydev_sys_patch.py | 17 +- .../pydevd/_pydev_imps/_pydev_xmlrpclib.py | 1493 ------- .../_pydev_runfiles/pydev_runfiles_nose.py | 10 +- .../_pydev_runfiles/pydev_runfiles_xml_rpc.py | 62 +- .../pydevd/_pydevd_bundle/pydevd_api.py | 22 +- .../_pydevd_bundle/pydevd_breakpoints.py | 6 +- .../_pydevd_bundle/pydevd_bytecode_utils.py | 4 - .../_pydevd_bundle/pydevd_code_to_source.py | 155 +- .../pydevd_collect_bytecode_info.py | 19 +- .../pydevd/_pydevd_bundle/pydevd_comm.py | 69 +- .../pydevd/_pydevd_bundle/pydevd_constants.py | 138 +- .../pydevd/_pydevd_bundle/pydevd_cython.c | 197 +- .../pydevd/_pydevd_bundle/pydevd_cython.pyx | 6 +- .../_pydevd_bundle/pydevd_dont_trace_files.py | 22 +- .../pydevd/_pydevd_bundle/pydevd_exec.py | 5 - .../pydevd/_pydevd_bundle/pydevd_frame.py | 6 +- .../pydevd/_pydevd_bundle/pydevd_io.py | 30 +- .../_pydevd_bundle/pydevd_net_command.py | 22 +- .../pydevd_net_command_factory_json.py | 10 +- .../pydevd_net_command_factory_xml.py | 6 +- .../pydevd_process_net_command.py | 11 +- .../pydevd_process_net_command_json.py | 5 +- .../pydevd/_pydevd_bundle/pydevd_resolver.py | 37 +- .../pydevd/_pydevd_bundle/pydevd_safe_repr.py | 52 +- .../pydevd/_pydevd_bundle/pydevd_signature.py | 4 +- .../_pydevd_bundle/pydevd_source_mapping.py | 6 +- .../pydevd/_pydevd_bundle/pydevd_stackless.py | 5 +- .../_pydevd_bundle/pydevd_suspended_frames.py | 13 +- .../_pydevd_bundle/pydevd_traceproperty.py | 18 +- .../pydevd/_pydevd_bundle/pydevd_utils.py | 34 +- .../pydevd/_pydevd_bundle/pydevd_vars.py | 35 +- .../pydevd/_pydevd_bundle/pydevd_xml.py | 46 +- .../pydevd/build_tools/build_binaries_osx.py | 2 - .../build_tools/build_binaries_windows.py | 17 +- .../pydevd/build_tools/generate_code.py | 15 +- .../build_tools/pydevd_release_process.txt | 76 +- .../pydevd/build_tools/rename_pep8.py | 16 +- .../_vendored/pydevd/interpreterInfo.py | 23 +- .../_vendored/pydevd/pycompletionserver.py | 63 +- src/debugpy/_vendored/pydevd/pydevconsole.py | 17 +- src/debugpy/_vendored/pydevd/pydevd.py | 37 +- .../pydevd_concurrency_logger.py | 11 +- .../_vendored/pydevd/pydevd_file_utils.py | 40 +- .../pydevd/pydevd_plugins/django_debug.py | 12 +- .../pydevd/pydevd_plugins/jinja2_debug.py | 73 +- .../pydevd_plugins/pydevd_line_validation.py | 3 +- .../_vendored/pydevd/pydevd_tracing.py | 26 +- src/debugpy/_vendored/pydevd/runfiles.py | 14 +- .../_vendored/pydevd/stubs/_get_tips.py | 283 -- .../_vendored/pydevd/stubs/pycompletion.py | 39 - .../pydevd/tests_python/debug_constants.py | 8 - .../pydevd/tests_python/debugger_fixtures.py | 10 +- .../pydevd/tests_python/debugger_unittest.py | 28 +- .../test_code_obj_to_source_code.py | 4 - .../test_collect_bytecode_info.py | 21 +- .../pydevd/tests_python/test_console.py | 6 +- .../tests_python/test_convert_utilities.py | 11 +- .../pydevd/tests_python/test_debugger.py | 85 +- .../pydevd/tests_python/test_debugger_json.py | 132 +- .../pydevd/tests_python/test_dump_threads.py | 5 +- .../pydevd/tests_python/test_extract_token.py | 7 +- .../pydevd/tests_python/test_fixtures.py | 3 +- .../pydevd/tests_python/test_pydev_monkey.py | 10 +- .../pydevd/tests_python/test_resolvers.py | 31 +- .../_vendored/pydevd/tests_python/test_run.py | 8 +- .../pydevd/tests_python/test_safe_repr.py | 105 +- .../tests_python/test_tracing_gotchas.py | 1 - .../pydevd/tests_python/test_utilities.py | 50 +- .../pydevd/tests_runfiles/test_runfiles.py | 53 +- .../pydevd/third_party/cython_json.py | 320 -- .../isort_container/isort/__init__.py | 28 - .../isort_container/isort/__main__.py | 3 - .../isort_container/isort/hooks.py | 82 - .../isort_container/isort/isort.py | 969 ----- .../third_party/isort_container/isort/main.py | 296 -- .../isort_container/isort/natural.py | 47 - .../isort_container/isort/pie_slice.py | 594 --- .../isort_container/isort/pylama_isort.py | 29 - .../isort_container/isort/settings.py | 256 -- .../pydevd/third_party/pep8/autopep8.py | 3827 ----------------- .../pep8/lib2to3/lib2to3/.gitignore | 1 - .../pep8/lib2to3/lib2to3/Grammar.txt | 158 - .../pep8/lib2to3/lib2to3/PatternGrammar.txt | 28 - .../pep8/lib2to3/lib2to3/__init__.py | 1 - .../pep8/lib2to3/lib2to3/__main__.py | 4 - .../pep8/lib2to3/lib2to3/btm_matcher.py | 168 - .../pep8/lib2to3/lib2to3/btm_utils.py | 283 -- .../pep8/lib2to3/lib2to3/fixer_base.py | 189 - .../pep8/lib2to3/lib2to3/fixer_util.py | 432 -- .../pep8/lib2to3/lib2to3/fixes/__init__.py | 1 - .../pep8/lib2to3/lib2to3/fixes/fix_apply.py | 59 - .../lib2to3/lib2to3/fixes/fix_basestring.py | 14 - .../pep8/lib2to3/lib2to3/fixes/fix_buffer.py | 22 - .../lib2to3/lib2to3/fixes/fix_callable.py | 37 - .../pep8/lib2to3/lib2to3/fixes/fix_dict.py | 107 - .../pep8/lib2to3/lib2to3/fixes/fix_except.py | 93 - .../pep8/lib2to3/lib2to3/fixes/fix_exec.py | 40 - .../lib2to3/lib2to3/fixes/fix_execfile.py | 52 - .../lib2to3/lib2to3/fixes/fix_exitfunc.py | 72 - .../pep8/lib2to3/lib2to3/fixes/fix_filter.py | 76 - .../lib2to3/lib2to3/fixes/fix_funcattrs.py | 21 - .../pep8/lib2to3/lib2to3/fixes/fix_future.py | 22 - .../pep8/lib2to3/lib2to3/fixes/fix_getcwdu.py | 19 - .../pep8/lib2to3/lib2to3/fixes/fix_has_key.py | 110 - .../pep8/lib2to3/lib2to3/fixes/fix_idioms.py | 152 - .../pep8/lib2to3/lib2to3/fixes/fix_import.py | 99 - .../pep8/lib2to3/lib2to3/fixes/fix_imports.py | 145 - .../lib2to3/lib2to3/fixes/fix_imports2.py | 16 - .../pep8/lib2to3/lib2to3/fixes/fix_input.py | 26 - .../pep8/lib2to3/lib2to3/fixes/fix_intern.py | 46 - .../lib2to3/lib2to3/fixes/fix_isinstance.py | 52 - .../lib2to3/lib2to3/fixes/fix_itertools.py | 43 - .../lib2to3/fixes/fix_itertools_imports.py | 57 - .../pep8/lib2to3/lib2to3/fixes/fix_long.py | 19 - .../pep8/lib2to3/lib2to3/fixes/fix_map.py | 91 - .../lib2to3/lib2to3/fixes/fix_metaclass.py | 228 - .../lib2to3/lib2to3/fixes/fix_methodattrs.py | 24 - .../pep8/lib2to3/lib2to3/fixes/fix_ne.py | 23 - .../pep8/lib2to3/lib2to3/fixes/fix_next.py | 103 - .../pep8/lib2to3/lib2to3/fixes/fix_nonzero.py | 21 - .../lib2to3/lib2to3/fixes/fix_numliterals.py | 28 - .../lib2to3/lib2to3/fixes/fix_operator.py | 96 - .../pep8/lib2to3/lib2to3/fixes/fix_paren.py | 44 - .../pep8/lib2to3/lib2to3/fixes/fix_print.py | 87 - .../pep8/lib2to3/lib2to3/fixes/fix_raise.py | 90 - .../lib2to3/lib2to3/fixes/fix_raw_input.py | 17 - .../pep8/lib2to3/lib2to3/fixes/fix_reduce.py | 35 - .../pep8/lib2to3/lib2to3/fixes/fix_renames.py | 70 - .../pep8/lib2to3/lib2to3/fixes/fix_repr.py | 23 - .../lib2to3/lib2to3/fixes/fix_set_literal.py | 53 - .../lib2to3/fixes/fix_standarderror.py | 18 - .../pep8/lib2to3/lib2to3/fixes/fix_sys_exc.py | 30 - .../pep8/lib2to3/lib2to3/fixes/fix_throw.py | 56 - .../lib2to3/lib2to3/fixes/fix_tuple_params.py | 175 - .../pep8/lib2to3/lib2to3/fixes/fix_types.py | 62 - .../pep8/lib2to3/lib2to3/fixes/fix_unicode.py | 42 - .../pep8/lib2to3/lib2to3/fixes/fix_urllib.py | 197 - .../lib2to3/lib2to3/fixes/fix_ws_comma.py | 39 - .../pep8/lib2to3/lib2to3/fixes/fix_xrange.py | 73 - .../lib2to3/lib2to3/fixes/fix_xreadlines.py | 25 - .../pep8/lib2to3/lib2to3/fixes/fix_zip.py | 35 - .../third_party/pep8/lib2to3/lib2to3/main.py | 269 -- .../pep8/lib2to3/lib2to3/patcomp.py | 205 - .../pep8/lib2to3/lib2to3/pgen2/__init__.py | 4 - .../pep8/lib2to3/lib2to3/pgen2/conv.py | 257 -- .../pep8/lib2to3/lib2to3/pgen2/driver.py | 157 - .../pep8/lib2to3/lib2to3/pgen2/grammar.py | 184 - .../pep8/lib2to3/lib2to3/pgen2/literals.py | 60 - .../pep8/lib2to3/lib2to3/pgen2/parse.py | 201 - .../pep8/lib2to3/lib2to3/pgen2/pgen.py | 386 -- .../pep8/lib2to3/lib2to3/pgen2/token.py | 82 - .../pep8/lib2to3/lib2to3/pgen2/tokenize.py | 499 --- .../pep8/lib2to3/lib2to3/pygram.py | 40 - .../pep8/lib2to3/lib2to3/pytree.py | 887 ---- .../pep8/lib2to3/lib2to3/refactor.py | 747 ---- .../pydevd/third_party/pep8/pycodestyle.py | 2325 ---------- .../pydevd/third_party/tests_cython_json.py | 101 - .../wrapped_for_pydev/ctypes/__init__.py | 518 --- .../wrapped_for_pydev/ctypes/_ctypes.dll | Bin 287417 -> 0 bytes .../wrapped_for_pydev/ctypes/_endian.py | 58 - .../ctypes/ctypes-README.txt | 134 - .../ctypes/macholib/.cvsignore | 1 - .../ctypes/macholib/__init__.py | 9 - .../wrapped_for_pydev/ctypes/macholib/dyld.py | 167 - .../ctypes/macholib/dylib.py | 63 - .../ctypes/macholib/framework.py | 65 - .../wrapped_for_pydev/ctypes/util.py | 124 - .../wrapped_for_pydev/ctypes/wintypes.py | 98 - .../not_in_default_pythonpath.txt | 1 - 187 files changed, 760 insertions(+), 25414 deletions(-) delete mode 100644 src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_BaseHTTPServer.py delete mode 100644 src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_SimpleXMLRPCServer.py delete mode 100644 src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_SocketServer.py delete mode 100644 src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_inspect.py delete mode 100644 src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_pkgutil_old.py delete mode 100644 src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_xmlrpclib.py delete mode 100644 src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_exec.py delete mode 100644 src/debugpy/_vendored/pydevd/stubs/_get_tips.py delete mode 100644 src/debugpy/_vendored/pydevd/stubs/pycompletion.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/cython_json.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/isort_container/isort/__init__.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/isort_container/isort/__main__.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/isort_container/isort/hooks.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/isort_container/isort/isort.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/isort_container/isort/main.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/isort_container/isort/natural.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/isort_container/isort/pie_slice.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/isort_container/isort/pylama_isort.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/isort_container/isort/settings.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/autopep8.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/.gitignore delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/Grammar.txt delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/PatternGrammar.txt delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/__init__.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/__main__.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/btm_matcher.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/btm_utils.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixer_base.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixer_util.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/__init__.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_apply.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_basestring.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_buffer.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_callable.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_dict.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_except.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_exec.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_execfile.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_exitfunc.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_filter.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_funcattrs.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_future.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_getcwdu.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_has_key.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_idioms.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_import.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_imports.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_imports2.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_input.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_intern.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_isinstance.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_itertools.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_itertools_imports.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_long.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_map.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_metaclass.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_methodattrs.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_ne.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_next.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_nonzero.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_numliterals.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_operator.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_paren.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_print.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_raise.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_raw_input.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_reduce.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_renames.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_repr.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_set_literal.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_standarderror.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_sys_exc.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_throw.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_tuple_params.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_types.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_unicode.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_urllib.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_ws_comma.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_xrange.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_xreadlines.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_zip.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/main.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/patcomp.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/__init__.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/conv.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/driver.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/grammar.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/literals.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/parse.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/pgen.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/token.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/tokenize.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pygram.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pytree.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/refactor.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/pep8/pycodestyle.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/tests_cython_json.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/__init__.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/_ctypes.dll delete mode 100644 src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/_endian.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/ctypes-README.txt delete mode 100644 src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/macholib/.cvsignore delete mode 100644 src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/macholib/__init__.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/macholib/dyld.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/macholib/dylib.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/macholib/framework.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/util.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/wintypes.py delete mode 100644 src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/not_in_default_pythonpath.txt diff --git a/src/debugpy/_vendored/pydevd/_pydev_bundle/_pydev_calltip_util.py b/src/debugpy/_vendored/pydevd/_pydev_bundle/_pydev_calltip_util.py index b846fb4e..aca108fa 100644 --- a/src/debugpy/_vendored/pydevd/_pydev_bundle/_pydev_calltip_util.py +++ b/src/debugpy/_vendored/pydevd/_pydev_bundle/_pydev_calltip_util.py @@ -2,20 +2,16 @@ License: Apache 2.0 Author: Yuli Fitterman ''' -# noinspection PyBroadException import types -from _pydevd_bundle.pydevd_constants import IS_JYTHON, IS_PY3K +from _pydevd_bundle.pydevd_constants import IS_JYTHON try: import inspect except: - try: - from _pydev_imps import _pydev_inspect as inspect - except: - import traceback; + import traceback; - traceback.print_exc() # Ok, no inspect available (search will not work)from _pydevd_bundle.pydevd_constants import IS_JYTHON, IS_PY3K + traceback.print_exc() # Ok, no inspect available (search will not work) from _pydev_bundle._pydev_imports_tipper import signature_from_docstring @@ -57,7 +53,7 @@ def get_description(obj): fn_name = None fn_class = None if isinstance(fob, (types.FunctionType, types.MethodType)): - spec_info = inspect.getfullargspec(fob) if IS_PY3K else inspect.getargspec(fob) + spec_info = inspect.getfullargspec(fob) argspec = inspect.formatargspec(*spec_info) fn_name = getattr(fob, '__name__', None) if isinstance(obj, type) or type(obj).__name__ == 'classobj': @@ -141,6 +137,7 @@ def create_class_stub(class_name, contents): def create_function_stub(fn_name, fn_argspec, fn_docstring, indent=0): + def shift_right(string, prefix): return ''.join(prefix + line for line in string.splitlines(True)) diff --git a/src/debugpy/_vendored/pydevd/_pydev_bundle/_pydev_completer.py b/src/debugpy/_vendored/pydevd/_pydev_bundle/_pydev_completer.py index 1765d512..ed0db4ea 100644 --- a/src/debugpy/_vendored/pydevd/_pydev_bundle/_pydev_completer.py +++ b/src/debugpy/_vendored/pydevd/_pydev_bundle/_pydev_completer.py @@ -2,13 +2,9 @@ from collections import namedtuple from string import ascii_letters, digits from _pydevd_bundle import pydevd_xml -from _pydevd_bundle.pydevd_constants import IS_PY2 import pydevconsole -if IS_PY2: - import __builtin__ -else: - import builtins as __builtin__ # Py3 +import builtins as __builtin__ # Py3 try: import java.lang # @UnusedImport @@ -192,8 +188,6 @@ def completions_to_xml(completions): msg = [""] for comp in completions: - if IS_PY2: - comp = [(x.encode('utf-8') if x.__class__ == unicode else x) for x in comp] msg.append(' + paramClassName = repr(paramTypesClass) # should be something like paramClassName = paramClassName.split('\'')[1] except: - paramClassName = repr(paramTypesClass) #just in case something else happens... it will at least be visible - #if the parameter equals [C, it means it it a char array, so, let's change it + paramClassName = repr(paramTypesClass) # just in case something else happens... it will at least be visible + # if the parameter equals [C, it means it it a char array, so, let's change it a = format_param_class_name(paramClassName) - #a = a.replace('[]','Array') - #a = a.replace('Object', 'obj') - #a = a.replace('String', 's') - #a = a.replace('Integer', 'i') - #a = a.replace('Char', 'c') - #a = a.replace('Double', 'd') - args.append(a) #so we don't leave invalid code - + # a = a.replace('[]','Array') + # a = a.replace('Object', 'obj') + # a = a.replace('String', 's') + # a = a.replace('Integer', 'i') + # a = a.replace('Char', 'c') + # a = a.replace('Double', 'd') + args.append(a) # so we don't leave invalid code info = Info(name, args=args, ret=ret) - #print_ info.basic_as_str() + # print_ info.basic_as_str() infos.append(info) return 1, infos except Exception: - s = StringIO.StringIO() + s = StringIO() traceback.print_exc(file=s) return 1, [Info(str('ERROR'), doc=s.getvalue())] return 0, None + def ismodule(mod): - #java modules... do we have other way to know that? + # java modules... do we have other way to know that? if not hasattr(mod, 'getClass') and not hasattr(mod, '__class__') \ and hasattr(mod, '__name__'): return 1 @@ -312,20 +307,20 @@ def dir_obj(obj): if hasattr(obj, '__class__'): if obj.__class__ == java.lang.Class: - #get info about superclasses + # get info about superclasses classes = [] classes.append(obj) try: c = obj.getSuperclass() except TypeError: - #may happen on jython when getting the java.lang.Class class + # may happen on jython when getting the java.lang.Class class c = obj.getSuperclass(obj) while c != None: classes.append(c) c = c.getSuperclass() - #get info about interfaces + # get info about interfaces interfs = [] for obj in classes: try: @@ -334,7 +329,7 @@ def dir_obj(obj): interfs.extend(obj.getInterfaces(obj)) classes.extend(interfs) - #now is the time when we actually get info on the declared methods and fields + # now is the time when we actually get info on the declared methods and fields for obj in classes: try: declaredMethods = obj.getDeclaredMethods() @@ -356,17 +351,15 @@ def dir_obj(obj): ret.append(name) found.put(name, 1) - elif isclass(obj.__class__): d = dir(obj.__class__) for name in d: ret.append(name) found.put(name, 1) - - #this simple dir does not always get all the info, that's why we have the part before - #(e.g.: if we do a dir on String, some methods that are from other interfaces such as - #charAt don't appear) + # this simple dir does not always get all the info, that's why we have the part before + # (e.g.: if we do a dir on String, some methods that are from other interfaces such as + # charAt don't appear) d = dir(original) for name in d: if found.get(name) != 1: @@ -393,7 +386,6 @@ def format_arg(arg): return s - def search_definition(data): '''@return file, line, col ''' @@ -437,36 +429,36 @@ def generate_imports_tip_for_module(obj_to_complete, dir_comps=None, getattr=get try: obj = getattr(obj_to_complete, d) except (AttributeError, java.lang.NoClassDefFoundError): - #jython has a bug in its custom classloader that prevents some things from working correctly, so, let's see if - #we can fix that... (maybe fixing it in jython itself would be a better idea, as this is clearly a bug) - #for that we need a custom classloader... we have references from it in the below places: + # jython has a bug in its custom classloader that prevents some things from working correctly, so, let's see if + # we can fix that... (maybe fixing it in jython itself would be a better idea, as this is clearly a bug) + # for that we need a custom classloader... we have references from it in the below places: # - #http://mindprod.com/jgloss/classloader.html - #http://www.javaworld.com/javaworld/jw-03-2000/jw-03-classload-p2.html - #http://freshmeat.net/articles/view/1643/ + # http://mindprod.com/jgloss/classloader.html + # http://www.javaworld.com/javaworld/jw-03-2000/jw-03-classload-p2.html + # http://freshmeat.net/articles/view/1643/ # - #note: this only happens when we add things to the sys.path at runtime, if they are added to the classpath - #before the run, everything goes fine. + # note: this only happens when we add things to the sys.path at runtime, if they are added to the classpath + # before the run, everything goes fine. # - #The code below ilustrates what I mean... + # The code below ilustrates what I mean... # - #import sys - #sys.path.insert(1, r"C:\bin\eclipse310\plugins\org.junit_3.8.1\junit.jar" ) + # import sys + # sys.path.insert(1, r"C:\bin\eclipse310\plugins\org.junit_3.8.1\junit.jar" ) # - #import junit.framework - #print_ dir(junit.framework) #shows the TestCase class here + # import junit.framework + # print_ dir(junit.framework) #shows the TestCase class here # - #import junit.framework.TestCase + # import junit.framework.TestCase # - #raises the error: - #Traceback (innermost last): + # raises the error: + # Traceback (innermost last): # File "", line 1, in ? - #ImportError: No module named TestCase + # ImportError: No module named TestCase # - #whereas if we had added the jar to the classpath before, everything would be fine by now... + # whereas if we had added the jar to the classpath before, everything would be fine by now... ret.append((d, '', '', retType)) - #that's ok, private things cannot be gotten... + # that's ok, private things cannot be gotten... continue else: @@ -494,10 +486,9 @@ def generate_imports_tip_for_module(obj_to_complete, dir_comps=None, getattr=get elif ismodule(obj): retType = TYPE_IMPORT - #add token and doc to return - assure only strings. + # add token and doc to return - assure only strings. ret.append((d, doc, args, retType)) - return ret diff --git a/src/debugpy/_vendored/pydevd/_pydev_bundle/_pydev_log.py b/src/debugpy/_vendored/pydevd/_pydev_bundle/_pydev_log.py index 853348b2..7328d621 100644 --- a/src/debugpy/_vendored/pydevd/_pydev_bundle/_pydev_log.py +++ b/src/debugpy/_vendored/pydevd/_pydev_bundle/_pydev_log.py @@ -1,28 +1,24 @@ import traceback import sys -try: - import StringIO -except: - import io as StringIO #Python 3.0 - - +from io import StringIO + + class Log: - + def __init__(self): self._contents = [] - + def add_content(self, *content): self._contents.append(' '.join(content)) - + def add_exception(self): - s = StringIO.StringIO() + s = StringIO() exc_info = sys.exc_info() traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], limit=None, file=s) self._contents.append(s.getvalue()) - def get_contents(self): return '\n'.join(self._contents) - + def clear_log(self): - del self._contents[:] \ No newline at end of file + del self._contents[:] diff --git a/src/debugpy/_vendored/pydevd/_pydev_bundle/_pydev_tipper_common.py b/src/debugpy/_vendored/pydevd/_pydev_bundle/_pydev_tipper_common.py index 79ce4988..ba2799ae 100644 --- a/src/debugpy/_vendored/pydevd/_pydev_bundle/_pydev_tipper_common.py +++ b/src/debugpy/_vendored/pydevd/_pydev_bundle/_pydev_tipper_common.py @@ -1,22 +1,9 @@ -try: - import inspect -except: - try: - from _pydev_imps import _pydev_inspect as inspect - except: - import traceback;traceback.print_exc() #Ok, no inspect available (search will not work) - -try: - import re -except: - try: - import sre as re # for older versions - except: - import traceback;traceback.print_exc() #Ok, no inspect available (search will not work) - +import inspect +import re from _pydevd_bundle.pydevd_constants import xrange + def do_find(f, mod): import linecache if inspect.ismodule(mod): @@ -40,7 +27,7 @@ def do_find(f, mod): try: mod = mod.func_code except AttributeError: - mod = mod.__code__ #python 3k + mod = mod.__code__ # python 3k if inspect.istraceback(mod): mod = mod.tb_frame diff --git a/src/debugpy/_vendored/pydevd/_pydev_bundle/pydev_console_utils.py b/src/debugpy/_vendored/pydevd/_pydev_bundle/pydev_console_utils.py index 72689101..4fe06f38 100644 --- a/src/debugpy/_vendored/pydevd/_pydev_bundle/pydev_console_utils.py +++ b/src/debugpy/_vendored/pydevd/_pydev_bundle/pydev_console_utils.py @@ -5,19 +5,13 @@ from _pydev_bundle.pydev_imports import xmlrpclib, _queue, Exec from _pydev_bundle._pydev_calltip_util import get_description from _pydevd_bundle import pydevd_vars from _pydevd_bundle import pydevd_xml -from _pydevd_bundle.pydevd_constants import (IS_JYTHON, dict_iter_items, NEXT_VALUE_SEPARATOR, get_global_debugger, +from _pydevd_bundle.pydevd_constants import (IS_JYTHON, NEXT_VALUE_SEPARATOR, get_global_debugger, silence_warnings_decorator) from contextlib import contextmanager from _pydev_bundle import pydev_log from _pydevd_bundle.pydevd_utils import interrupt_main_thread -try: - import cStringIO as StringIO # may not always be available @UnusedImport -except: - try: - import StringIO # @Reimport - except: - import io as StringIO +from io import StringIO # ======================================================================================================================= @@ -440,7 +434,7 @@ class BaseInterpreterInterface: return True def getFrame(self): - xml = StringIO.StringIO() + xml = StringIO() hidden_ns = self.get_ipython_hidden_vars_dict() xml.write("") xml.write(pydevd_xml.frame_vars_to_xml(self.get_namespace(), hidden_ns)) @@ -450,13 +444,13 @@ class BaseInterpreterInterface: @silence_warnings_decorator def getVariable(self, attributes): - xml = StringIO.StringIO() + xml = StringIO() xml.write("") val_dict = pydevd_vars.resolve_compound_var_object_fields(self.get_namespace(), attributes) if val_dict is None: val_dict = {} - for k, val in dict_iter_items(val_dict): + for k, val in val_dict.items(): val = val_dict[k] evaluate_full_value = pydevd_xml.should_evaluate_full_value(val) xml.write(pydevd_vars.var_to_xml(val, k, evaluate_full_value=evaluate_full_value)) @@ -471,7 +465,7 @@ class BaseInterpreterInterface: return pydevd_vars.table_like_struct_to_xml(array, name, roffset, coffset, rows, cols, format) def evaluate(self, expression): - xml = StringIO.StringIO() + xml = StringIO() xml.write("") result = pydevd_vars.eval_in_context(expression, self.get_namespace(), self.get_namespace()) xml.write(pydevd_vars.var_to_xml(result, expression)) @@ -536,7 +530,7 @@ class BaseInterpreterInterface: debugger_options = {} env_key = "PYDEVD_EXTRA_ENVS" if env_key in debugger_options: - for (env_name, value) in dict_iter_items(debugger_options[env_key]): + for (env_name, value) in debugger_options[env_key].items(): existing_value = os.environ.get(env_name, None) if existing_value: os.environ[env_name] = "%s%c%s" % (existing_value, os.path.pathsep, value) diff --git a/src/debugpy/_vendored/pydevd/_pydev_bundle/pydev_imports.py b/src/debugpy/_vendored/pydevd/_pydev_bundle/pydev_imports.py index 9fb17b94..fb912ead 100644 --- a/src/debugpy/_vendored/pydevd/_pydev_bundle/pydev_imports.py +++ b/src/debugpy/_vendored/pydevd/_pydev_bundle/pydev_imports.py @@ -1,47 +1,26 @@ from _pydevd_bundle.pydevd_constants import USE_LIB_COPY, izip try: - try: - if USE_LIB_COPY: - from _pydev_imps._pydev_saved_modules import xmlrpclib - else: - import xmlrpclib - except ImportError: - import xmlrpc.client as xmlrpclib + if USE_LIB_COPY: + from _pydev_imps._pydev_saved_modules import xmlrpclib + else: + import xmlrpclib except ImportError: - from _pydev_imps import _pydev_xmlrpclib as xmlrpclib + import xmlrpc.client as xmlrpclib -try: - try: - if USE_LIB_COPY: - from _pydev_imps._pydev_saved_modules import _pydev_SimpleXMLRPCServer - from _pydev_SimpleXMLRPCServer import SimpleXMLRPCServer - else: - from SimpleXMLRPCServer import SimpleXMLRPCServer - except ImportError: - from xmlrpc.server import SimpleXMLRPCServer -except ImportError: - from _pydev_imps._pydev_SimpleXMLRPCServer import SimpleXMLRPCServer +if USE_LIB_COPY: + from _pydev_imps._pydev_saved_modules import xmlrpcserver + SimpleXMLRPCServer = xmlrpcserver.SimpleXMLRPCServer +else: + from xmlrpc.server import SimpleXMLRPCServer -try: - from StringIO import StringIO -except ImportError: - from io import StringIO +from io import StringIO -try: - execfile = execfile # Not in Py3k -except NameError: - from _pydev_imps._pydev_execfile import execfile +from _pydev_imps._pydev_execfile import execfile from _pydev_imps._pydev_saved_modules import _queue -try: - from _pydevd_bundle.pydevd_exec import Exec -except: - from _pydevd_bundle.pydevd_exec2 import Exec +from _pydevd_bundle.pydevd_exec2 import Exec -try: - from urllib import quote, quote_plus, unquote_plus -except: - from urllib.parse import quote, quote_plus, unquote_plus # @UnresolvedImport +from urllib.parse import quote, quote_plus, unquote_plus # @UnresolvedImport diff --git a/src/debugpy/_vendored/pydevd/_pydev_bundle/pydev_ipython_console.py b/src/debugpy/_vendored/pydevd/_pydev_bundle/pydev_ipython_console.py index 180f2895..a1221f97 100644 --- a/src/debugpy/_vendored/pydevd/_pydev_bundle/pydev_ipython_console.py +++ b/src/debugpy/_vendored/pydevd/_pydev_bundle/pydev_ipython_console.py @@ -1,14 +1,13 @@ import sys from _pydev_bundle.pydev_console_utils import BaseInterpreterInterface -import os import traceback # Uncomment to force PyDev standard shell. # raise ImportError() from _pydev_bundle.pydev_ipython_console_011 import get_pydev_frontend -from _pydevd_bundle.pydevd_constants import dict_iter_items + #======================================================================================================================= # InterpreterInterface @@ -48,11 +47,9 @@ class InterpreterInterface(BaseInterpreterInterface): return res - def get_namespace(self): return self.interpreter.get_namespace() - def getCompletions(self, text, act_tok): return self.interpreter.getCompletions(text, act_tok) @@ -61,8 +58,8 @@ class InterpreterInterface(BaseInterpreterInterface): def notify_about_magic(self): if not self.notification_succeeded: - self.notification_tries+=1 - if self.notification_tries>self.notification_max_tries: + self.notification_tries += 1 + if self.notification_tries > self.notification_max_tries: return completions = self.getCompletions("%", "%") magic_commands = [x[0] for x in completions] @@ -73,7 +70,7 @@ class InterpreterInterface(BaseInterpreterInterface): try: server.NotifyAboutMagic(magic_commands, self.interpreter.is_automagic()) self.notification_succeeded = True - except : + except: self.notification_succeeded = False def get_ipython_hidden_vars_dict(self): @@ -85,7 +82,7 @@ class InterpreterInterface(BaseInterpreterInterface): user_hidden_dict = user_ns_hidden.copy() else: # In IPython 1.x `user_ns_hidden` used to be a set with names of hidden variables - user_hidden_dict = dict([(key, val) for key, val in dict_iter_items(self.interpreter.ipython.user_ns) + user_hidden_dict = dict([(key, val) for key, val in self.interpreter.ipython.user_ns.items() if key in user_ns_hidden]) # while `_`, `__` and `___` were not initialized, they are not presented in `user_ns_hidden` diff --git a/src/debugpy/_vendored/pydevd/_pydev_bundle/pydev_ipython_console_011.py b/src/debugpy/_vendored/pydevd/_pydev_bundle/pydev_ipython_console_011.py index e8aaccc3..f60a2d25 100644 --- a/src/debugpy/_vendored/pydevd/_pydev_bundle/pydev_ipython_console_011.py +++ b/src/debugpy/_vendored/pydevd/_pydev_bundle/pydev_ipython_console_011.py @@ -34,7 +34,6 @@ except ImportError: from IPython.core import release from _pydev_bundle.pydev_imports import xmlrpclib -from _pydevd_bundle.pydevd_constants import dict_keys default_pydev_banner_parts = default_banner_parts @@ -364,9 +363,9 @@ class _PyDevFrontEnd: def update(self, globals, locals): ns = self.ipython.user_ns - for key in dict_keys(self.ipython.user_ns): + for key, value in list(ns.items()): if key not in locals: - locals[key] = ns[key] + locals[key] = value self.ipython.user_global_ns.clear() self.ipython.user_global_ns.update(globals) diff --git a/src/debugpy/_vendored/pydevd/_pydev_bundle/pydev_localhost.py b/src/debugpy/_vendored/pydevd/_pydev_bundle/pydev_localhost.py index 27f6f5b7..db134d8c 100644 --- a/src/debugpy/_vendored/pydevd/_pydev_bundle/pydev_localhost.py +++ b/src/debugpy/_vendored/pydevd/_pydev_bundle/pydev_localhost.py @@ -1,10 +1,11 @@ -from _pydevd_bundle import pydevd_constants from _pydev_imps._pydev_saved_modules import socket import sys IS_JYTHON = sys.platform.find('java') != -1 _cache = None + + def get_localhost(): ''' Should return 127.0.0.1 in ipv4 and ::1 in ipv6 @@ -48,17 +49,19 @@ def get_socket_names(n_sockets, close=False): sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind((get_localhost(), 0)) socket_name = sock.getsockname() - + sockets.append(sock) socket_names.append(socket_name) - + if close: for s in sockets: s.close() return socket_names - + + def get_socket_name(close=False): return get_socket_names(1, close)[0] + if __name__ == '__main__': - print(get_socket_name()) \ No newline at end of file + print(get_socket_name()) diff --git a/src/debugpy/_vendored/pydevd/_pydev_bundle/pydev_monkey.py b/src/debugpy/_vendored/pydevd/_pydev_bundle/pydev_monkey.py index 637d73d9..11b3747c 100644 --- a/src/debugpy/_vendored/pydevd/_pydev_bundle/pydev_monkey.py +++ b/src/debugpy/_vendored/pydevd/_pydev_bundle/pydev_monkey.py @@ -4,7 +4,7 @@ import re import sys from _pydev_imps._pydev_saved_modules import threading from _pydevd_bundle.pydevd_constants import get_global_debugger, IS_WINDOWS, IS_JYTHON, get_current_thread_id, \ - sorted_dict_repr, IS_PY2 + sorted_dict_repr from _pydev_bundle import pydev_log from contextlib import contextmanager from _pydevd_bundle import pydevd_constants @@ -278,9 +278,6 @@ def remove_quotes_from_args(args): for x in args: if Path is not None and isinstance(x, Path): x = str(x) - elif IS_PY2: - if not isinstance(x, (str, unicode)): - raise InvalidTypeInArgsException(str(type(x))) else: if not isinstance(x, (bytes, str)): raise InvalidTypeInArgsException(str(type(x))) @@ -298,9 +295,6 @@ def remove_quotes_from_args(args): for x in args: if Path is not None and isinstance(x, Path): x = x.as_posix() - elif IS_PY2: - if not isinstance(x, (str, unicode)): - raise InvalidTypeInArgsException(str(type(x))) else: if not isinstance(x, (bytes, str)): raise InvalidTypeInArgsException(str(type(x))) diff --git a/src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_BaseHTTPServer.py b/src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_BaseHTTPServer.py deleted file mode 100644 index f8dd9111..00000000 --- a/src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_BaseHTTPServer.py +++ /dev/null @@ -1,604 +0,0 @@ -"""HTTP server base class. - -Note: the class in this module doesn't implement any HTTP request; see -SimpleHTTPServer for simple implementations of GET, HEAD and POST -(including CGI scripts). It does, however, optionally implement HTTP/1.1 -persistent connections, as of version 0.3. - -Contents: - -- BaseHTTPRequestHandler: HTTP request handler base class -- test: test function - -XXX To do: - -- log requests even later (to capture byte count) -- log user-agent header and other interesting goodies -- send error log to separate file -""" - - -# See also: -# -# HTTP Working Group T. Berners-Lee -# INTERNET-DRAFT R. T. Fielding -# H. Frystyk Nielsen -# Expires September 8, 1995 March 8, 1995 -# -# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt -# -# and -# -# Network Working Group R. Fielding -# Request for Comments: 2616 et al -# Obsoletes: 2068 June 1999 -# Category: Standards Track -# -# URL: http://www.faqs.org/rfcs/rfc2616.html - -# Log files -# --------- -# -# Here's a quote from the NCSA httpd docs about log file format. -# -# | The logfile format is as follows. Each line consists of: -# | -# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb -# | -# | host: Either the DNS name or the IP number of the remote client -# | rfc931: Any information returned by identd for this person, -# | - otherwise. -# | authuser: If user sent a userid for authentication, the user name, -# | - otherwise. -# | DD: Day -# | Mon: Month (calendar name) -# | YYYY: Year -# | hh: hour (24-hour format, the machine's timezone) -# | mm: minutes -# | ss: seconds -# | request: The first line of the HTTP request as sent by the client. -# | ddd: the status code returned by the server, - if not available. -# | bbbb: the total number of bytes sent, -# | *not including the HTTP/1.0 header*, - if not available -# | -# | You can determine the name of the file accessed through request. -# -# (Actually, the latter is only true if you know the server configuration -# at the time the request was made!) - -__version__ = "0.3" - -__all__ = ["HTTPServer", "BaseHTTPRequestHandler"] - -import sys -from _pydev_imps._pydev_saved_modules import time -from _pydev_imps._pydev_saved_modules import socket -from warnings import filterwarnings, catch_warnings -with catch_warnings(): - if sys.py3kwarning: - filterwarnings("ignore", ".*mimetools has been removed", - DeprecationWarning) - import mimetools - -from _pydev_imps import _pydev_SocketServer as SocketServer - -# Default error message template -DEFAULT_ERROR_MESSAGE = """\ - -Error response - - -

Error response

-

Error code %(code)d. -

Message: %(message)s. -

Error code explanation: %(code)s = %(explain)s. - -""" - -DEFAULT_ERROR_CONTENT_TYPE = "text/html" - -def _quote_html(html): - return html.replace("&", "&").replace("<", "<").replace(">", ">") - -class HTTPServer(SocketServer.TCPServer): - - allow_reuse_address = 1 # Seems to make sense in testing environment - - def server_bind(self): - """Override server_bind to store the server name.""" - SocketServer.TCPServer.server_bind(self) - host, port = self.socket.getsockname()[:2] - self.server_name = socket.getfqdn(host) - self.server_port = port - - -class BaseHTTPRequestHandler(SocketServer.StreamRequestHandler): - - """HTTP request handler base class. - - The following explanation of HTTP serves to guide you through the - code as well as to expose any misunderstandings I may have about - HTTP (so you don't need to read the code to figure out I'm wrong - :-). - - HTTP (HyperText Transfer Protocol) is an extensible protocol on - top of a reliable stream transport (e.g. TCP/IP). The protocol - recognizes three parts to a request: - - 1. One line identifying the request type and path - 2. An optional set of RFC-822-style headers - 3. An optional data part - - The headers and data are separated by a blank line. - - The first line of the request has the form - - - - where is a (case-sensitive) keyword such as GET or POST, - is a string containing path information for the request, - and should be the string "HTTP/1.0" or "HTTP/1.1". - is encoded using the URL encoding scheme (using %xx to signify - the ASCII character with hex code xx). - - The specification specifies that lines are separated by CRLF but - for compatibility with the widest range of clients recommends - servers also handle LF. Similarly, whitespace in the request line - is treated sensibly (allowing multiple spaces between components - and allowing trailing whitespace). - - Similarly, for output, lines ought to be separated by CRLF pairs - but most clients grok LF characters just fine. - - If the first line of the request has the form - - - - (i.e. is left out) then this is assumed to be an HTTP - 0.9 request; this form has no optional headers and data part and - the reply consists of just the data. - - The reply form of the HTTP 1.x protocol again has three parts: - - 1. One line giving the response code - 2. An optional set of RFC-822-style headers - 3. The data - - Again, the headers and data are separated by a blank line. - - The response code line has the form - - - - where is the protocol version ("HTTP/1.0" or "HTTP/1.1"), - is a 3-digit response code indicating success or - failure of the request, and is an optional - human-readable string explaining what the response code means. - - This server parses the request and the headers, and then calls a - function specific to the request type (). Specifically, - a request SPAM will be handled by a method do_SPAM(). If no - such method exists the server sends an error response to the - client. If it exists, it is called with no arguments: - - do_SPAM() - - Note that the request name is case sensitive (i.e. SPAM and spam - are different requests). - - The various request details are stored in instance variables: - - - client_address is the client IP address in the form (host, - port); - - - command, path and version are the broken-down request line; - - - headers is an instance of mimetools.Message (or a derived - class) containing the header information; - - - rfile is a file object open for reading positioned at the - start of the optional input data part; - - - wfile is a file object open for writing. - - IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING! - - The first thing to be written must be the response line. Then - follow 0 or more header lines, then a blank line, and then the - actual data (if any). The meaning of the header lines depends on - the command executed by the server; in most cases, when data is - returned, there should be at least one header line of the form - - Content-type: / - - where and should be registered MIME types, - e.g. "text/html" or "text/plain". - - """ - - # The Python system version, truncated to its first component. - sys_version = "Python/" + sys.version.split()[0] - - # The server software version. You may want to override this. - # The format is multiple whitespace-separated strings, - # where each string is of the form name[/version]. - server_version = "BaseHTTP/" + __version__ - - # The default request version. This only affects responses up until - # the point where the request line is parsed, so it mainly decides what - # the client gets back when sending a malformed request line. - # Most web servers default to HTTP 0.9, i.e. don't send a status line. - default_request_version = "HTTP/0.9" - - def parse_request(self): - """Parse a request (internal). - - The request should be stored in self.raw_requestline; the results - are in self.command, self.path, self.request_version and - self.headers. - - Return True for success, False for failure; on failure, an - error is sent back. - - """ - self.command = None # set in case of error on the first line - self.request_version = version = self.default_request_version - self.close_connection = 1 - requestline = self.raw_requestline - requestline = requestline.rstrip('\r\n') - self.requestline = requestline - words = requestline.split() - if len(words) == 3: - command, path, version = words - if version[:5] != 'HTTP/': - self.send_error(400, "Bad request version (%r)" % version) - return False - try: - base_version_number = version.split('/', 1)[1] - version_number = base_version_number.split(".") - # RFC 2145 section 3.1 says there can be only one "." and - # - major and minor numbers MUST be treated as - # separate integers; - # - HTTP/2.4 is a lower version than HTTP/2.13, which in - # turn is lower than HTTP/12.3; - # - Leading zeros MUST be ignored by recipients. - if len(version_number) != 2: - raise ValueError - version_number = int(version_number[0]), int(version_number[1]) - except (ValueError, IndexError): - self.send_error(400, "Bad request version (%r)" % version) - return False - if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1": - self.close_connection = 0 - if version_number >= (2, 0): - self.send_error(505, - "Invalid HTTP Version (%s)" % base_version_number) - return False - elif len(words) == 2: - command, path = words - self.close_connection = 1 - if command != 'GET': - self.send_error(400, - "Bad HTTP/0.9 request type (%r)" % command) - return False - elif not words: - return False - else: - self.send_error(400, "Bad request syntax (%r)" % requestline) - return False - self.command, self.path, self.request_version = command, path, version - - # Examine the headers and look for a Connection directive - self.headers = self.MessageClass(self.rfile, 0) - - conntype = self.headers.get('Connection', "") - if conntype.lower() == 'close': - self.close_connection = 1 - elif (conntype.lower() == 'keep-alive' and - self.protocol_version >= "HTTP/1.1"): - self.close_connection = 0 - return True - - def handle_one_request(self): - """Handle a single HTTP request. - - You normally don't need to override this method; see the class - __doc__ string for information on how to handle specific HTTP - commands such as GET and POST. - - """ - try: - self.raw_requestline = self.rfile.readline(65537) - if len(self.raw_requestline) > 65536: - self.requestline = '' - self.request_version = '' - self.command = '' - self.send_error(414) - return - if not self.raw_requestline: - self.close_connection = 1 - return - if not self.parse_request(): - # An error code has been sent, just exit - return - mname = 'do_' + self.command - if not hasattr(self, mname): - self.send_error(501, "Unsupported method (%r)" % self.command) - return - method = getattr(self, mname) - method() - self.wfile.flush() #actually send the response if not already done. - except socket.timeout: - #a read or a write timed out. Discard this connection - self.log_error("Request timed out: %r", sys.exc_info()[1]) - self.close_connection = 1 - return - - def handle(self): - """Handle multiple requests if necessary.""" - self.close_connection = 1 - - self.handle_one_request() - while not self.close_connection: - self.handle_one_request() - - def send_error(self, code, message=None): - """Send and log an error reply. - - Arguments are the error code, and a detailed message. - The detailed message defaults to the short entry matching the - response code. - - This sends an error response (so it must be called before any - output has been generated), logs the error, and finally sends - a piece of HTML explaining the error to the user. - - """ - - try: - short, long = self.responses[code] - except KeyError: - short, long = '???', '???' - if message is None: - message = short - explain = long - self.log_error("code %d, message %s", code, message) - # using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201) - content = (self.error_message_format % - {'code': code, 'message': _quote_html(message), 'explain': explain}) - self.send_response(code, message) - self.send_header("Content-Type", self.error_content_type) - self.send_header('Connection', 'close') - self.end_headers() - if self.command != 'HEAD' and code >= 200 and code not in (204, 304): - self.wfile.write(content) - - error_message_format = DEFAULT_ERROR_MESSAGE - error_content_type = DEFAULT_ERROR_CONTENT_TYPE - - def send_response(self, code, message=None): - """Send the response header and log the response code. - - Also send two standard headers with the server software - version and the current date. - - """ - self.log_request(code) - if message is None: - if code in self.responses: - message = self.responses[code][0] - else: - message = '' - if self.request_version != 'HTTP/0.9': - self.wfile.write("%s %d %s\r\n" % - (self.protocol_version, code, message)) - # print (self.protocol_version, code, message) - self.send_header('Server', self.version_string()) - self.send_header('Date', self.date_time_string()) - - def send_header(self, keyword, value): - """Send a MIME header.""" - if self.request_version != 'HTTP/0.9': - self.wfile.write("%s: %s\r\n" % (keyword, value)) - - if keyword.lower() == 'connection': - if value.lower() == 'close': - self.close_connection = 1 - elif value.lower() == 'keep-alive': - self.close_connection = 0 - - def end_headers(self): - """Send the blank line ending the MIME headers.""" - if self.request_version != 'HTTP/0.9': - self.wfile.write("\r\n") - - def log_request(self, code='-', size='-'): - """Log an accepted request. - - This is called by send_response(). - - """ - - self.log_message('"%s" %s %s', - self.requestline, str(code), str(size)) - - def log_error(self, format, *args): - """Log an error. - - This is called when a request cannot be fulfilled. By - default it passes the message on to log_message(). - - Arguments are the same as for log_message(). - - XXX This should go to the separate error log. - - """ - - self.log_message(format, *args) - - def log_message(self, format, *args): - """Log an arbitrary message. - - This is used by all other logging functions. Override - it if you have specific logging wishes. - - The first argument, FORMAT, is a format string for the - message to be logged. If the format string contains - any % escapes requiring parameters, they should be - specified as subsequent arguments (it's just like - printf!). - - The client host and current date/time are prefixed to - every message. - - """ - - sys.stderr.write("%s - - [%s] %s\n" % - (self.address_string(), - self.log_date_time_string(), - format%args)) - - def version_string(self): - """Return the server software version string.""" - return self.server_version + ' ' + self.sys_version - - def date_time_string(self, timestamp=None): - """Return the current date and time formatted for a message header.""" - if timestamp is None: - timestamp = time.time() - year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp) - s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( - self.weekdayname[wd], - day, self.monthname[month], year, - hh, mm, ss) - return s - - def log_date_time_string(self): - """Return the current time formatted for logging.""" - now = time.time() - year, month, day, hh, mm, ss, x, y, z = time.localtime(now) - s = "%02d/%3s/%04d %02d:%02d:%02d" % ( - day, self.monthname[month], year, hh, mm, ss) - return s - - weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] - - monthname = [None, - 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', - 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] - - def address_string(self): - """Return the client address formatted for logging. - - This version looks up the full hostname using gethostbyaddr(), - and tries to find a name that contains at least one dot. - - """ - - host, port = self.client_address[:2] - return socket.getfqdn(host) - - # Essentially static class variables - - # The version of the HTTP protocol we support. - # Set this to HTTP/1.1 to enable automatic keepalive - protocol_version = "HTTP/1.0" - - # The Message-like class used to parse headers - MessageClass = mimetools.Message - - # Table mapping response codes to messages; entries have the - # form {code: (shortmessage, longmessage)}. - # See RFC 2616. - responses = { - 100: ('Continue', 'Request received, please continue'), - 101: ('Switching Protocols', - 'Switching to new protocol; obey Upgrade header'), - - 200: ('OK', 'Request fulfilled, document follows'), - 201: ('Created', 'Document created, URL follows'), - 202: ('Accepted', - 'Request accepted, processing continues off-line'), - 203: ('Non-Authoritative Information', 'Request fulfilled from cache'), - 204: ('No Content', 'Request fulfilled, nothing follows'), - 205: ('Reset Content', 'Clear input form for further input.'), - 206: ('Partial Content', 'Partial content follows.'), - - 300: ('Multiple Choices', - 'Object has several resources -- see URI list'), - 301: ('Moved Permanently', 'Object moved permanently -- see URI list'), - 302: ('Found', 'Object moved temporarily -- see URI list'), - 303: ('See Other', 'Object moved -- see Method and URL list'), - 304: ('Not Modified', - 'Document has not changed since given time'), - 305: ('Use Proxy', - 'You must use proxy specified in Location to access this ' - 'resource.'), - 307: ('Temporary Redirect', - 'Object moved temporarily -- see URI list'), - - 400: ('Bad Request', - 'Bad request syntax or unsupported method'), - 401: ('Unauthorized', - 'No permission -- see authorization schemes'), - 402: ('Payment Required', - 'No payment -- see charging schemes'), - 403: ('Forbidden', - 'Request forbidden -- authorization will not help'), - 404: ('Not Found', 'Nothing matches the given URI'), - 405: ('Method Not Allowed', - 'Specified method is invalid for this resource.'), - 406: ('Not Acceptable', 'URI not available in preferred format.'), - 407: ('Proxy Authentication Required', 'You must authenticate with ' - 'this proxy before proceeding.'), - 408: ('Request Timeout', 'Request timed out; try again later.'), - 409: ('Conflict', 'Request conflict.'), - 410: ('Gone', - 'URI no longer exists and has been permanently removed.'), - 411: ('Length Required', 'Client must specify Content-Length.'), - 412: ('Precondition Failed', 'Precondition in headers is false.'), - 413: ('Request Entity Too Large', 'Entity is too large.'), - 414: ('Request-URI Too Long', 'URI is too long.'), - 415: ('Unsupported Media Type', 'Entity body in unsupported format.'), - 416: ('Requested Range Not Satisfiable', - 'Cannot satisfy request range.'), - 417: ('Expectation Failed', - 'Expect condition could not be satisfied.'), - - 500: ('Internal Server Error', 'Server got itself in trouble'), - 501: ('Not Implemented', - 'Server does not support this operation'), - 502: ('Bad Gateway', 'Invalid responses from another server/proxy.'), - 503: ('Service Unavailable', - 'The server cannot process the request due to a high load'), - 504: ('Gateway Timeout', - 'The gateway server did not receive a timely response'), - 505: ('HTTP Version Not Supported', 'Cannot fulfill request.'), - } - - -def test(HandlerClass = BaseHTTPRequestHandler, - ServerClass = HTTPServer, protocol="HTTP/1.0"): - """Test the HTTP request handler class. - - This runs an HTTP server on port 8000 (or the first command line - argument). - - """ - - if sys.argv[1:]: - port = int(sys.argv[1]) - else: - port = 8000 - server_address = ('', port) - - HandlerClass.protocol_version = protocol - httpd = ServerClass(server_address, HandlerClass) - - sa = httpd.socket.getsockname() - print ("Serving HTTP on", sa[0], "port", sa[1], "...") - httpd.serve_forever() - - -if __name__ == '__main__': - test() diff --git a/src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_SimpleXMLRPCServer.py b/src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_SimpleXMLRPCServer.py deleted file mode 100644 index c5f77426..00000000 --- a/src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_SimpleXMLRPCServer.py +++ /dev/null @@ -1,601 +0,0 @@ -#Just a copy of the version in python 2.5 to be used if it's not available in jython 2.1 - -"""Simple XML-RPC Server. - -This module can be used to create simple XML-RPC servers -by creating a server and either installing functions, a -class instance, or by extending the SimpleXMLRPCServer -class. - -It can also be used to handle XML-RPC requests in a CGI -environment using CGIXMLRPCRequestHandler. - -A list of possible usage patterns follows: - -1. Install functions: - -server = SimpleXMLRPCServer(("localhost", 8000)) -server.register_function(pow) -server.register_function(lambda x,y: x+y, 'add') -server.serve_forever() - -2. Install an instance: - -class MyFuncs: - def __init__(self): - # make all of the string functions available through - # string.func_name - import string - self.string = string - def _listMethods(self): - # implement this method so that system.listMethods - # knows to advertise the strings methods - return list_public_methods(self) + \ - ['string.' + method for method in list_public_methods(self.string)] - def pow(self, x, y): return pow(x, y) - def add(self, x, y) : return x + y - -server = SimpleXMLRPCServer(("localhost", 8000)) -server.register_introspection_functions() -server.register_instance(MyFuncs()) -server.serve_forever() - -3. Install an instance with custom dispatch method: - -class Math: - def _listMethods(self): - # this method must be present for system.listMethods - # to work - return ['add', 'pow'] - def _methodHelp(self, method): - # this method must be present for system.methodHelp - # to work - if method == 'add': - return "add(2,3) => 5" - elif method == 'pow': - return "pow(x, y[, z]) => number" - else: - # By convention, return empty - # string if no help is available - return "" - def _dispatch(self, method, params): - if method == 'pow': - return pow(*params) - elif method == 'add': - return params[0] + params[1] - else: - raise 'bad method' - -server = SimpleXMLRPCServer(("localhost", 8000)) -server.register_introspection_functions() -server.register_instance(Math()) -server.serve_forever() - -4. Subclass SimpleXMLRPCServer: - -class MathServer(SimpleXMLRPCServer): - def _dispatch(self, method, params): - try: - # We are forcing the 'export_' prefix on methods that are - # callable through XML-RPC to prevent potential security - # problems - func = getattr(self, 'export_' + method) - except AttributeError: - raise Exception('method "%s" is not supported' % method) - else: - return func(*params) - - def export_add(self, x, y): - return x + y - -server = MathServer(("localhost", 8000)) -server.serve_forever() - -5. CGI script: - -server = CGIXMLRPCRequestHandler() -server.register_function(pow) -server.handle_request() -""" - -# Written by Brian Quinlan (brian@sweetapp.com). -# Based on code written by Fredrik Lundh. - -from _pydev_imps import _pydev_xmlrpclib as xmlrpclib -from _pydev_imps._pydev_xmlrpclib import Fault -from _pydev_imps import _pydev_SocketServer as SocketServer -from _pydev_imps import _pydev_BaseHTTPServer as BaseHTTPServer -import sys -import os -try: - import fcntl -except ImportError: - fcntl = None - -def resolve_dotted_attribute(obj, attr, allow_dotted_names=True): - """resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d - - Resolves a dotted attribute name to an object. Raises - an AttributeError if any attribute in the chain starts with a '_'. - - If the optional allow_dotted_names argument is false, dots are not - supported and this function operates similar to getattr(obj, attr). - """ - - if allow_dotted_names: - attrs = attr.split('.') - else: - attrs = [attr] - - for i in attrs: - if i.startswith('_'): - raise AttributeError( - 'attempt to access private attribute "%s"' % i - ) - else: - obj = getattr(obj, i) - return obj - -def list_public_methods(obj): - """Returns a list of attribute strings, found in the specified - object, which represent callable attributes""" - - return [member for member in dir(obj) - if not member.startswith('_') and - callable(getattr(obj, member))] - -def remove_duplicates(lst): - """remove_duplicates([2,2,2,1,3,3]) => [3,1,2] - - Returns a copy of a list without duplicates. Every list - item must be hashable and the order of the items in the - resulting list is not defined. - """ - u = {} - for x in lst: - u[x] = 1 - - return u.keys() - -class SimpleXMLRPCDispatcher: - """Mix-in class that dispatches XML-RPC requests. - - This class is used to register XML-RPC method handlers - and then to dispatch them. There should never be any - reason to instantiate this class directly. - """ - - def __init__(self, allow_none, encoding): - self.funcs = {} - self.instance = None - self.allow_none = allow_none - self.encoding = encoding - - def register_instance(self, instance, allow_dotted_names=False): - """Registers an instance to respond to XML-RPC requests. - - Only one instance can be installed at a time. - - If the registered instance has a _dispatch method then that - method will be called with the name of the XML-RPC method and - its parameters as a tuple - e.g. instance._dispatch('add',(2,3)) - - If the registered instance does not have a _dispatch method - then the instance will be searched to find a matching method - and, if found, will be called. Methods beginning with an '_' - are considered private and will not be called by - SimpleXMLRPCServer. - - If a registered function matches a XML-RPC request, then it - will be called instead of the registered instance. - - If the optional allow_dotted_names argument is true and the - instance does not have a _dispatch method, method names - containing dots are supported and resolved, as long as none of - the name segments start with an '_'. - - *** SECURITY WARNING: *** - - Enabling the allow_dotted_names options allows intruders - to access your module's global variables and may allow - intruders to execute arbitrary code on your machine. Only - use this option on a secure, closed network. - - """ - - self.instance = instance - self.allow_dotted_names = allow_dotted_names - - def register_function(self, function, name=None): - """Registers a function to respond to XML-RPC requests. - - The optional name argument can be used to set a Unicode name - for the function. - """ - - if name is None: - name = function.__name__ - self.funcs[name] = function - - def register_introspection_functions(self): - """Registers the XML-RPC introspection methods in the system - namespace. - - see http://xmlrpc.usefulinc.com/doc/reserved.html - """ - - self.funcs.update({'system.listMethods' : self.system_listMethods, - 'system.methodSignature' : self.system_methodSignature, - 'system.methodHelp' : self.system_methodHelp}) - - def register_multicall_functions(self): - """Registers the XML-RPC multicall method in the system - namespace. - - see http://www.xmlrpc.com/discuss/msgReader$1208""" - - self.funcs.update({'system.multicall' : self.system_multicall}) - - def _marshaled_dispatch(self, data, dispatch_method=None): - """Dispatches an XML-RPC method from marshalled (XML) data. - - XML-RPC methods are dispatched from the marshalled (XML) data - using the _dispatch method and the result is returned as - marshalled data. For backwards compatibility, a dispatch - function can be provided as an argument (see comment in - SimpleXMLRPCRequestHandler.do_POST) but overriding the - existing method through subclassing is the prefered means - of changing method dispatch behavior. - """ - try: - params, method = xmlrpclib.loads(data) - - # generate response - if dispatch_method is not None: - response = dispatch_method(method, params) - else: - response = self._dispatch(method, params) - # wrap response in a singleton tuple - response = (response,) - response = xmlrpclib.dumps(response, methodresponse=1, - allow_none=self.allow_none, encoding=self.encoding) - except Fault, fault: - response = xmlrpclib.dumps(fault, allow_none=self.allow_none, - encoding=self.encoding) - except: - # report exception back to server - response = xmlrpclib.dumps( - xmlrpclib.Fault(1, "%s:%s" % (sys.exc_type, sys.exc_value)), #@UndefinedVariable exc_value only available when we actually have an exception - encoding=self.encoding, allow_none=self.allow_none, - ) - - return response - - def system_listMethods(self): - """system.listMethods() => ['add', 'subtract', 'multiple'] - - Returns a list of the methods supported by the server.""" - - methods = self.funcs.keys() - if self.instance is not None: - # Instance can implement _listMethod to return a list of - # methods - if hasattr(self.instance, '_listMethods'): - methods = remove_duplicates( - methods + self.instance._listMethods() - ) - # if the instance has a _dispatch method then we - # don't have enough information to provide a list - # of methods - elif not hasattr(self.instance, '_dispatch'): - methods = remove_duplicates( - methods + list_public_methods(self.instance) - ) - methods.sort() - return methods - - def system_methodSignature(self, method_name): - """system.methodSignature('add') => [double, int, int] - - Returns a list describing the signature of the method. In the - above example, the add method takes two integers as arguments - and returns a double result. - - This server does NOT support system.methodSignature.""" - - # See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html - - return 'signatures not supported' - - def system_methodHelp(self, method_name): - """system.methodHelp('add') => "Adds two integers together" - - Returns a string containing documentation for the specified method.""" - - method = None - if self.funcs.has_key(method_name): - method = self.funcs[method_name] - elif self.instance is not None: - # Instance can implement _methodHelp to return help for a method - if hasattr(self.instance, '_methodHelp'): - return self.instance._methodHelp(method_name) - # if the instance has a _dispatch method then we - # don't have enough information to provide help - elif not hasattr(self.instance, '_dispatch'): - try: - method = resolve_dotted_attribute( - self.instance, - method_name, - self.allow_dotted_names - ) - except AttributeError: - pass - - # Note that we aren't checking that the method actually - # be a callable object of some kind - if method is None: - return "" - else: - try: - import pydoc - except ImportError: - return "" #not there for jython - else: - return pydoc.getdoc(method) - - def system_multicall(self, call_list): - """system.multicall([{'methodName': 'add', 'params': [2, 2]}, ...]) => \ -[[4], ...] - - Allows the caller to package multiple XML-RPC calls into a single - request. - - See http://www.xmlrpc.com/discuss/msgReader$1208 - """ - - results = [] - for call in call_list: - method_name = call['methodName'] - params = call['params'] - - try: - # XXX A marshalling error in any response will fail the entire - # multicall. If someone cares they should fix this. - results.append([self._dispatch(method_name, params)]) - except Fault, fault: - results.append( - {'faultCode' : fault.faultCode, - 'faultString' : fault.faultString} - ) - except: - results.append( - {'faultCode' : 1, - 'faultString' : "%s:%s" % (sys.exc_type, sys.exc_value)} #@UndefinedVariable exc_value only available when we actually have an exception - ) - return results - - def _dispatch(self, method, params): - """Dispatches the XML-RPC method. - - XML-RPC calls are forwarded to a registered function that - matches the called XML-RPC method name. If no such function - exists then the call is forwarded to the registered instance, - if available. - - If the registered instance has a _dispatch method then that - method will be called with the name of the XML-RPC method and - its parameters as a tuple - e.g. instance._dispatch('add',(2,3)) - - If the registered instance does not have a _dispatch method - then the instance will be searched to find a matching method - and, if found, will be called. - - Methods beginning with an '_' are considered private and will - not be called. - """ - - func = None - try: - # check to see if a matching function has been registered - func = self.funcs[method] - except KeyError: - if self.instance is not None: - # check for a _dispatch method - if hasattr(self.instance, '_dispatch'): - return self.instance._dispatch(method, params) - else: - # call instance method directly - try: - func = resolve_dotted_attribute( - self.instance, - method, - self.allow_dotted_names - ) - except AttributeError: - pass - - if func is not None: - return func(*params) - else: - raise Exception('method "%s" is not supported' % method) - -class SimpleXMLRPCRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): - """Simple XML-RPC request handler class. - - Handles all HTTP POST requests and attempts to decode them as - XML-RPC requests. - """ - - # Class attribute listing the accessible path components; - # paths not on this list will result in a 404 error. - rpc_paths = ('/', '/RPC2') - - def is_rpc_path_valid(self): - if self.rpc_paths: - return self.path in self.rpc_paths - else: - # If .rpc_paths is empty, just assume all paths are legal - return True - - def do_POST(self): - """Handles the HTTP POST request. - - Attempts to interpret all HTTP POST requests as XML-RPC calls, - which are forwarded to the server's _dispatch method for handling. - """ - - # Check that the path is legal - if not self.is_rpc_path_valid(): - self.report_404() - return - - try: - # Get arguments by reading body of request. - # We read this in chunks to avoid straining - # socket.read(); around the 10 or 15Mb mark, some platforms - # begin to have problems (bug #792570). - max_chunk_size = 10 * 1024 * 1024 - size_remaining = int(self.headers["content-length"]) - L = [] - while size_remaining: - chunk_size = min(size_remaining, max_chunk_size) - L.append(self.rfile.read(chunk_size)) - size_remaining -= len(L[-1]) - data = ''.join(L) - - # In previous versions of SimpleXMLRPCServer, _dispatch - # could be overridden in this class, instead of in - # SimpleXMLRPCDispatcher. To maintain backwards compatibility, - # check to see if a subclass implements _dispatch and dispatch - # using that method if present. - response = self.server._marshaled_dispatch( - data, getattr(self, '_dispatch', None) - ) - except: # This should only happen if the module is buggy - # internal error, report as HTTP server error - self.send_response(500) - self.end_headers() - else: - # got a valid XML RPC response - self.send_response(200) - self.send_header("Content-type", "text/xml") - self.send_header("Content-length", str(len(response))) - self.end_headers() - self.wfile.write(response) - - # shut down the connection - self.wfile.flush() - self.connection.shutdown(1) - - def report_404 (self): - # Report a 404 error - self.send_response(404) - response = 'No such page' - self.send_header("Content-type", "text/plain") - self.send_header("Content-length", str(len(response))) - self.end_headers() - self.wfile.write(response) - # shut down the connection - self.wfile.flush() - self.connection.shutdown(1) - - def log_request(self, code='-', size='-'): - """Selectively log an accepted request.""" - - if self.server.logRequests: - BaseHTTPServer.BaseHTTPRequestHandler.log_request(self, code, size) - -class SimpleXMLRPCServer(SocketServer.TCPServer, - SimpleXMLRPCDispatcher): - """Simple XML-RPC server. - - Simple XML-RPC server that allows functions and a single instance - to be installed to handle requests. The default implementation - attempts to dispatch XML-RPC calls to the functions or instance - installed in the server. Override the _dispatch method inhereted - from SimpleXMLRPCDispatcher to change this behavior. - """ - - allow_reuse_address = True - - def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler, - logRequests=True, allow_none=False, encoding=None): - self.logRequests = logRequests - - SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding) - SocketServer.TCPServer.__init__(self, addr, requestHandler) - - # [Bug #1222790] If possible, set close-on-exec flag; if a - # method spawns a subprocess, the subprocess shouldn't have - # the listening socket open. - if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'): - flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD) - flags |= fcntl.FD_CLOEXEC - fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags) - -class CGIXMLRPCRequestHandler(SimpleXMLRPCDispatcher): - """Simple handler for XML-RPC data passed through CGI.""" - - def __init__(self, allow_none=False, encoding=None): - SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding) - - def handle_xmlrpc(self, request_text): - """Handle a single XML-RPC request""" - - response = self._marshaled_dispatch(request_text) - - sys.stdout.write('Content-Type: text/xml\n') - sys.stdout.write('Content-Length: %d\n' % len(response)) - sys.stdout.write('\n') - - sys.stdout.write(response) - - def handle_get(self): - """Handle a single HTTP GET request. - - Default implementation indicates an error because - XML-RPC uses the POST method. - """ - - code = 400 - message, explain = \ - BaseHTTPServer.BaseHTTPRequestHandler.responses[code] - - response = BaseHTTPServer.DEFAULT_ERROR_MESSAGE % { #@UndefinedVariable - 'code' : code, - 'message' : message, - 'explain' : explain - } - sys.stdout.write('Status: %d %s\n' % (code, message)) - sys.stdout.write('Content-Type: text/html\n') - sys.stdout.write('Content-Length: %d\n' % len(response)) - sys.stdout.write('\n') - - sys.stdout.write(response) - - def handle_request(self, request_text=None): - """Handle a single XML-RPC request passed through a CGI post method. - - If no XML data is given then it is read from stdin. The resulting - XML-RPC response is printed to stdout along with the correct HTTP - headers. - """ - - if request_text is None and \ - os.environ.get('REQUEST_METHOD', None) == 'GET': - self.handle_get() - else: - # POST data is normally available through stdin - if request_text is None: - request_text = sys.stdin.read() - - self.handle_xmlrpc(request_text) - -if __name__ == '__main__': - sys.stdout.write('Running XML-RPC server on port 8000\n') - server = SimpleXMLRPCServer(("localhost", 8000)) - server.register_function(pow) - server.register_function(lambda x, y: x + y, 'add') - server.serve_forever() diff --git a/src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_SocketServer.py b/src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_SocketServer.py deleted file mode 100644 index 7af2777a..00000000 --- a/src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_SocketServer.py +++ /dev/null @@ -1,715 +0,0 @@ -"""Generic socket server classes. - -This module tries to capture the various aspects of defining a server: - -For socket-based servers: - -- address family: - - AF_INET{,6}: IP (Internet Protocol) sockets (default) - - AF_UNIX: Unix domain sockets - - others, e.g. AF_DECNET are conceivable (see -- socket type: - - SOCK_STREAM (reliable stream, e.g. TCP) - - SOCK_DGRAM (datagrams, e.g. UDP) - -For request-based servers (including socket-based): - -- client address verification before further looking at the request - (This is actually a hook for any processing that needs to look - at the request before anything else, e.g. logging) -- how to handle multiple requests: - - synchronous (one request is handled at a time) - - forking (each request is handled by a new process) - - threading (each request is handled by a new thread) - -The classes in this module favor the server type that is simplest to -write: a synchronous TCP/IP server. This is bad class design, but -save some typing. (There's also the issue that a deep class hierarchy -slows down method lookups.) - -There are five classes in an inheritance diagram, four of which represent -synchronous servers of four types: - - +------------+ - | BaseServer | - +------------+ - | - v - +-----------+ +------------------+ - | TCPServer |------->| UnixStreamServer | - +-----------+ +------------------+ - | - v - +-----------+ +--------------------+ - | UDPServer |------->| UnixDatagramServer | - +-----------+ +--------------------+ - -Note that UnixDatagramServer derives from UDPServer, not from -UnixStreamServer -- the only difference between an IP and a Unix -stream server is the address family, which is simply repeated in both -unix server classes. - -Forking and threading versions of each type of server can be created -using the ForkingMixIn and ThreadingMixIn mix-in classes. For -instance, a threading UDP server class is created as follows: - - class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass - -The Mix-in class must come first, since it overrides a method defined -in UDPServer! Setting the various member variables also changes -the behavior of the underlying server mechanism. - -To implement a service, you must derive a class from -BaseRequestHandler and redefine its handle() method. You can then run -various versions of the service by combining one of the server classes -with your request handler class. - -The request handler class must be different for datagram or stream -services. This can be hidden by using the request handler -subclasses StreamRequestHandler or DatagramRequestHandler. - -Of course, you still have to use your head! - -For instance, it makes no sense to use a forking server if the service -contains state in memory that can be modified by requests (since the -modifications in the child process would never reach the initial state -kept in the parent process and passed to each child). In this case, -you can use a threading server, but you will probably have to use -locks to avoid two requests that come in nearly simultaneous to apply -conflicting changes to the server state. - -On the other hand, if you are building e.g. an HTTP server, where all -data is stored externally (e.g. in the file system), a synchronous -class will essentially render the service "deaf" while one request is -being handled -- which may be for a very long time if a client is slow -to read all the data it has requested. Here a threading or forking -server is appropriate. - -In some cases, it may be appropriate to process part of a request -synchronously, but to finish processing in a forked child depending on -the request data. This can be implemented by using a synchronous -server and doing an explicit fork in the request handler class -handle() method. - -Another approach to handling multiple simultaneous requests in an -environment that supports neither threads nor fork (or where these are -too expensive or inappropriate for the service) is to maintain an -explicit table of partially finished requests and to use select() to -decide which request to work on next (or whether to handle a new -incoming request). This is particularly important for stream services -where each client can potentially be connected for a long time (if -threads or subprocesses cannot be used). - -Future work: -- Standard classes for Sun RPC (which uses either UDP or TCP) -- Standard mix-in classes to implement various authentication - and encryption schemes -- Standard framework for select-based multiplexing - -XXX Open problems: -- What to do with out-of-band data? - -BaseServer: -- split generic "request" functionality out into BaseServer class. - Copyright (C) 2000 Luke Kenneth Casson Leighton - - example: read entries from a SQL database (requires overriding - get_request() to return a table entry from the database). - entry is processed by a RequestHandlerClass. - -""" - -# Author of the BaseServer patch: Luke Kenneth Casson Leighton - -# XXX Warning! -# There is a test suite for this module, but it cannot be run by the -# standard regression test. -# To run it manually, run Lib/test/test_socketserver.py. - -__version__ = "0.4" - - -from _pydev_imps._pydev_saved_modules import socket -from _pydev_imps._pydev_saved_modules import select -import sys -import os -try: - from _pydev_imps._pydev_saved_modules import threading -except ImportError: - import dummy_threading as threading - -__all__ = ["TCPServer","UDPServer","ForkingUDPServer","ForkingTCPServer", - "ThreadingUDPServer","ThreadingTCPServer","BaseRequestHandler", - "StreamRequestHandler","DatagramRequestHandler", - "ThreadingMixIn", "ForkingMixIn"] -if hasattr(socket, "AF_UNIX"): - __all__.extend(["UnixStreamServer","UnixDatagramServer", - "ThreadingUnixStreamServer", - "ThreadingUnixDatagramServer"]) - -class BaseServer: - - """Base class for server classes. - - Methods for the caller: - - - __init__(server_address, RequestHandlerClass) - - serve_forever(poll_interval=0.5) - - shutdown() - - handle_request() # if you do not use serve_forever() - - fileno() -> int # for select() - - Methods that may be overridden: - - - server_bind() - - server_activate() - - get_request() -> request, client_address - - handle_timeout() - - verify_request(request, client_address) - - server_close() - - process_request(request, client_address) - - shutdown_request(request) - - close_request(request) - - handle_error() - - Methods for derived classes: - - - finish_request(request, client_address) - - Class variables that may be overridden by derived classes or - instances: - - - timeout - - address_family - - socket_type - - allow_reuse_address - - Instance variables: - - - RequestHandlerClass - - socket - - """ - - timeout = None - - def __init__(self, server_address, RequestHandlerClass): - """Constructor. May be extended, do not override.""" - self.server_address = server_address - self.RequestHandlerClass = RequestHandlerClass - self.__is_shut_down = threading.Event() # @UndefinedVariable - self.__shutdown_request = False - - def server_activate(self): - """Called by constructor to activate the server. - - May be overridden. - - """ - pass - - def serve_forever(self, poll_interval=0.5): - """Handle one request at a time until shutdown. - - Polls for shutdown every poll_interval seconds. Ignores - self.timeout. If you need to do periodic tasks, do them in - another thread. - """ - self.__is_shut_down.clear() - try: - while not self.__shutdown_request: - # XXX: Consider using another file descriptor or - # connecting to the socket to wake this up instead of - # polling. Polling reduces our responsiveness to a - # shutdown request and wastes cpu at all other times. - r, w, e = select.select([self], [], [], poll_interval) - if self in r: - self._handle_request_noblock() - finally: - self.__shutdown_request = False - self.__is_shut_down.set() - - def shutdown(self): - """Stops the serve_forever loop. - - Blocks until the loop has finished. This must be called while - serve_forever() is running in another thread, or it will - deadlock. - """ - self.__shutdown_request = True - self.__is_shut_down.wait() - - # The distinction between handling, getting, processing and - # finishing a request is fairly arbitrary. Remember: - # - # - handle_request() is the top-level call. It calls - # select, get_request(), verify_request() and process_request() - # - get_request() is different for stream or datagram sockets - # - process_request() is the place that may fork a new process - # or create a new thread to finish the request - # - finish_request() instantiates the request handler class; - # this constructor will handle the request all by itself - - def handle_request(self): - """Handle one request, possibly blocking. - - Respects self.timeout. - """ - # Support people who used socket.settimeout() to escape - # handle_request before self.timeout was available. - timeout = self.socket.gettimeout() - if timeout is None: - timeout = self.timeout - elif self.timeout is not None: - timeout = min(timeout, self.timeout) - fd_sets = select.select([self], [], [], timeout) - if not fd_sets[0]: - self.handle_timeout() - return - self._handle_request_noblock() - - def _handle_request_noblock(self): - """Handle one request, without blocking. - - I assume that select.select has returned that the socket is - readable before this function was called, so there should be - no risk of blocking in get_request(). - """ - try: - request, client_address = self.get_request() - except socket.error: - return - if self.verify_request(request, client_address): - try: - self.process_request(request, client_address) - except: - self.handle_error(request, client_address) - self.shutdown_request(request) - - def handle_timeout(self): - """Called if no new request arrives within self.timeout. - - Overridden by ForkingMixIn. - """ - pass - - def verify_request(self, request, client_address): - """Verify the request. May be overridden. - - Return True if we should proceed with this request. - - """ - return True - - def process_request(self, request, client_address): - """Call finish_request. - - Overridden by ForkingMixIn and ThreadingMixIn. - - """ - self.finish_request(request, client_address) - self.shutdown_request(request) - - def server_close(self): - """Called to clean-up the server. - - May be overridden. - - """ - pass - - def finish_request(self, request, client_address): - """Finish one request by instantiating RequestHandlerClass.""" - self.RequestHandlerClass(request, client_address, self) - - def shutdown_request(self, request): - """Called to shutdown and close an individual request.""" - self.close_request(request) - - def close_request(self, request): - """Called to clean up an individual request.""" - pass - - def handle_error(self, request, client_address): - """Handle an error gracefully. May be overridden. - - The default is to print a traceback and continue. - - """ - print '-'*40 - print 'Exception happened during processing of request from', - print client_address - import traceback - traceback.print_exc() # XXX But this goes to stderr! - print '-'*40 - - -class TCPServer(BaseServer): - - """Base class for various socket-based server classes. - - Defaults to synchronous IP stream (i.e., TCP). - - Methods for the caller: - - - __init__(server_address, RequestHandlerClass, bind_and_activate=True) - - serve_forever(poll_interval=0.5) - - shutdown() - - handle_request() # if you don't use serve_forever() - - fileno() -> int # for select() - - Methods that may be overridden: - - - server_bind() - - server_activate() - - get_request() -> request, client_address - - handle_timeout() - - verify_request(request, client_address) - - process_request(request, client_address) - - shutdown_request(request) - - close_request(request) - - handle_error() - - Methods for derived classes: - - - finish_request(request, client_address) - - Class variables that may be overridden by derived classes or - instances: - - - timeout - - address_family - - socket_type - - request_queue_size (only for stream sockets) - - allow_reuse_address - - Instance variables: - - - server_address - - RequestHandlerClass - - socket - - """ - - address_family = socket.AF_INET - - socket_type = socket.SOCK_STREAM - - request_queue_size = 5 - - allow_reuse_address = False - - def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True): - """Constructor. May be extended, do not override.""" - BaseServer.__init__(self, server_address, RequestHandlerClass) - self.socket = socket.socket(self.address_family, - self.socket_type) - if bind_and_activate: - self.server_bind() - self.server_activate() - - def server_bind(self): - """Called by constructor to bind the socket. - - May be overridden. - - """ - if self.allow_reuse_address: - self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - self.socket.bind(self.server_address) - self.server_address = self.socket.getsockname() - - def server_activate(self): - """Called by constructor to activate the server. - - May be overridden. - - """ - self.socket.listen(self.request_queue_size) - - def server_close(self): - """Called to clean-up the server. - - May be overridden. - - """ - self.socket.close() - - def fileno(self): - """Return socket file number. - - Interface required by select(). - - """ - return self.socket.fileno() - - def get_request(self): - """Get the request and client address from the socket. - - May be overridden. - - """ - return self.socket.accept() - - def shutdown_request(self, request): - """Called to shutdown and close an individual request.""" - try: - #explicitly shutdown. socket.close() merely releases - #the socket and waits for GC to perform the actual close. - request.shutdown(socket.SHUT_WR) - except socket.error: - pass #some platforms may raise ENOTCONN here - self.close_request(request) - - def close_request(self, request): - """Called to clean up an individual request.""" - request.close() - - -class UDPServer(TCPServer): - - """UDP server class.""" - - allow_reuse_address = False - - socket_type = socket.SOCK_DGRAM - - max_packet_size = 8192 - - def get_request(self): - data, client_addr = self.socket.recvfrom(self.max_packet_size) - return (data, self.socket), client_addr - - def server_activate(self): - # No need to call listen() for UDP. - pass - - def shutdown_request(self, request): - # No need to shutdown anything. - self.close_request(request) - - def close_request(self, request): - # No need to close anything. - pass - -class ForkingMixIn: - - """Mix-in class to handle each request in a new process.""" - - timeout = 300 - active_children = None - max_children = 40 - - def collect_children(self): - """Internal routine to wait for children that have exited.""" - if self.active_children is None: return - while len(self.active_children) >= self.max_children: - # XXX: This will wait for any child process, not just ones - # spawned by this library. This could confuse other - # libraries that expect to be able to wait for their own - # children. - try: - pid, status = os.waitpid(0, 0) - except os.error: - pid = None - if pid not in self.active_children: continue - self.active_children.remove(pid) - - # XXX: This loop runs more system calls than it ought - # to. There should be a way to put the active_children into a - # process group and then use os.waitpid(-pgid) to wait for any - # of that set, but I couldn't find a way to allocate pgids - # that couldn't collide. - for child in self.active_children: - try: - pid, status = os.waitpid(child, os.WNOHANG) # @UndefinedVariable - except os.error: - pid = None - if not pid: continue - try: - self.active_children.remove(pid) - except ValueError, e: - raise ValueError('%s. x=%d and list=%r' % (e.message, pid, - self.active_children)) - - def handle_timeout(self): - """Wait for zombies after self.timeout seconds of inactivity. - - May be extended, do not override. - """ - self.collect_children() - - def process_request(self, request, client_address): - """Fork a new subprocess to process the request.""" - self.collect_children() - pid = os.fork() # @UndefinedVariable - if pid: - # Parent process - if self.active_children is None: - self.active_children = [] - self.active_children.append(pid) - self.close_request(request) #close handle in parent process - return - else: - # Child process. - # This must never return, hence os._exit()! - try: - self.finish_request(request, client_address) - self.shutdown_request(request) - os._exit(0) - except: - try: - self.handle_error(request, client_address) - self.shutdown_request(request) - finally: - os._exit(1) - - -class ThreadingMixIn: - """Mix-in class to handle each request in a new thread.""" - - # Decides how threads will act upon termination of the - # main process - daemon_threads = False - - def process_request_thread(self, request, client_address): - """Same as in BaseServer but as a thread. - - In addition, exception handling is done here. - - """ - try: - self.finish_request(request, client_address) - self.shutdown_request(request) - except: - self.handle_error(request, client_address) - self.shutdown_request(request) - - def process_request(self, request, client_address): - """Start a new thread to process the request.""" - t = threading.Thread(target = self.process_request_thread, # @UndefinedVariable - args = (request, client_address)) - t.daemon = self.daemon_threads - t.start() - - -class ForkingUDPServer(ForkingMixIn, UDPServer): pass -class ForkingTCPServer(ForkingMixIn, TCPServer): pass - -class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass -class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass - -if hasattr(socket, 'AF_UNIX'): - - class UnixStreamServer(TCPServer): - address_family = socket.AF_UNIX # @UndefinedVariable - - class UnixDatagramServer(UDPServer): - address_family = socket.AF_UNIX # @UndefinedVariable - - class ThreadingUnixStreamServer(ThreadingMixIn, UnixStreamServer): pass - - class ThreadingUnixDatagramServer(ThreadingMixIn, UnixDatagramServer): pass - -class BaseRequestHandler: - - """Base class for request handler classes. - - This class is instantiated for each request to be handled. The - constructor sets the instance variables request, client_address - and server, and then calls the handle() method. To implement a - specific service, all you need to do is to derive a class which - defines a handle() method. - - The handle() method can find the request as self.request, the - client address as self.client_address, and the server (in case it - needs access to per-server information) as self.server. Since a - separate instance is created for each request, the handle() method - can define arbitrary other instance variariables. - - """ - - def __init__(self, request, client_address, server): - self.request = request - self.client_address = client_address - self.server = server - self.setup() - try: - self.handle() - finally: - self.finish() - - def setup(self): - pass - - def handle(self): - pass - - def finish(self): - pass - - -# The following two classes make it possible to use the same service -# class for stream or datagram servers. -# Each class sets up these instance variables: -# - rfile: a file object from which receives the request is read -# - wfile: a file object to which the reply is written -# When the handle() method returns, wfile is flushed properly - - -class StreamRequestHandler(BaseRequestHandler): - - """Define self.rfile and self.wfile for stream sockets.""" - - # Default buffer sizes for rfile, wfile. - # We default rfile to buffered because otherwise it could be - # really slow for large data (a getc() call per byte); we make - # wfile unbuffered because (a) often after a write() we want to - # read and we need to flush the line; (b) big writes to unbuffered - # files are typically optimized by stdio even when big reads - # aren't. - rbufsize = -1 - wbufsize = 0 - - # A timeout to apply to the request socket, if not None. - timeout = None - - # Disable nagle algorithm for this socket, if True. - # Use only when wbufsize != 0, to avoid small packets. - disable_nagle_algorithm = False - - def setup(self): - self.connection = self.request - if self.timeout is not None: - self.connection.settimeout(self.timeout) - if self.disable_nagle_algorithm: - self.connection.setsockopt(socket.IPPROTO_TCP, - socket.TCP_NODELAY, True) - self.rfile = self.connection.makefile('rb', self.rbufsize) - self.wfile = self.connection.makefile('wb', self.wbufsize) - - def finish(self): - if not self.wfile.closed: - self.wfile.flush() - self.wfile.close() - self.rfile.close() - - -class DatagramRequestHandler(BaseRequestHandler): - - # XXX Regrettably, I cannot get this working on Linux; - # s.recvfrom() doesn't return a meaningful client address. - - """Define self.rfile and self.wfile for datagram sockets.""" - - def setup(self): - try: - from cStringIO import StringIO - except ImportError: - from StringIO import StringIO - self.packet, self.socket = self.request - self.rfile = StringIO(self.packet) - self.wfile = StringIO() - - def finish(self): - self.socket.sendto(self.wfile.getvalue(), self.client_address) diff --git a/src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_inspect.py b/src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_inspect.py deleted file mode 100644 index 5fd33d87..00000000 --- a/src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_inspect.py +++ /dev/null @@ -1,788 +0,0 @@ -"""Get useful information from live Python objects. - -This module encapsulates the interface provided by the internal special -attributes (func_*, co_*, im_*, tb_*, etc.) in a friendlier fashion. -It also provides some help for examining source code and class layout. - -Here are some of the useful functions provided by this module: - - ismodule(), isclass(), ismethod(), isfunction(), istraceback(), - isframe(), iscode(), isbuiltin(), isroutine() - check object types - getmembers() - get members of an object that satisfy a given condition - - getfile(), getsourcefile(), getsource() - find an object's source code - getdoc(), getcomments() - get documentation on an object - getmodule() - determine the module that an object came from - getclasstree() - arrange classes so as to represent their hierarchy - - getargspec(), getargvalues() - get info about function arguments - formatargspec(), formatargvalues() - format an argument spec - getouterframes(), getinnerframes() - get info about frames - currentframe() - get the current stack frame - stack(), trace() - get info about frames on the stack or in a traceback -""" - -# This module is in the public domain. No warranties. - -__author__ = 'Ka-Ping Yee ' -__date__ = '1 Jan 2001' - -import sys, os, types, string, re, imp, tokenize - -# ----------------------------------------------------------- type-checking -def ismodule(object): - """Return true if the object is a module. - - Module objects provide these attributes: - __doc__ documentation string - __file__ filename (missing for built-in modules)""" - return isinstance(object, types.ModuleType) - -def isclass(object): - """Return true if the object is a class. - - Class objects provide these attributes: - __doc__ documentation string - __module__ name of module in which this class was defined""" - return isinstance(object, types.ClassType) or hasattr(object, '__bases__') - -def ismethod(object): - """Return true if the object is an instance method. - - Instance method objects provide these attributes: - __doc__ documentation string - __name__ name with which this method was defined - im_class class object in which this method belongs - im_func function object containing implementation of method - im_self instance to which this method is bound, or None""" - return isinstance(object, types.MethodType) - -def ismethoddescriptor(object): - """Return true if the object is a method descriptor. - - But not if ismethod() or isclass() or isfunction() are true. - - This is new in Python 2.2, and, for example, is true of int.__add__. - An object passing this test has a __get__ attribute but not a __set__ - attribute, but beyond that the set of attributes varies. __name__ is - usually sensible, and __doc__ often is. - - Methods implemented via descriptors that also pass one of the other - tests return false from the ismethoddescriptor() test, simply because - the other tests promise more -- you can, e.g., count on having the - im_func attribute (etc) when an object passes ismethod().""" - return (hasattr(object, "__get__") - and not hasattr(object, "__set__") # else it's a data descriptor - and not ismethod(object) # mutual exclusion - and not isfunction(object) - and not isclass(object)) - -def isfunction(object): - """Return true if the object is a user-defined function. - - Function objects provide these attributes: - __doc__ documentation string - __name__ name with which this function was defined - func_code code object containing compiled function bytecode - func_defaults tuple of any default values for arguments - func_doc (same as __doc__) - func_globals global namespace in which this function was defined - func_name (same as __name__)""" - return isinstance(object, types.FunctionType) - -def istraceback(object): - """Return true if the object is a traceback. - - Traceback objects provide these attributes: - tb_frame frame object at this level - tb_lasti index of last attempted instruction in bytecode - tb_lineno current line number in Python source code - tb_next next inner traceback object (called by this level)""" - return isinstance(object, types.TracebackType) - -def isframe(object): - """Return true if the object is a frame object. - - Frame objects provide these attributes: - f_back next outer frame object (this frame's caller) - f_builtins built-in namespace seen by this frame - f_code code object being executed in this frame - f_exc_traceback traceback if raised in this frame, or None - f_exc_type exception type if raised in this frame, or None - f_exc_value exception value if raised in this frame, or None - f_globals global namespace seen by this frame - f_lasti index of last attempted instruction in bytecode - f_lineno current line number in Python source code - f_locals local namespace seen by this frame - f_restricted 0 or 1 if frame is in restricted execution mode - f_trace tracing function for this frame, or None""" - return isinstance(object, types.FrameType) - -def iscode(object): - """Return true if the object is a code object. - - Code objects provide these attributes: - co_argcount number of arguments (not including * or ** args) - co_code string of raw compiled bytecode - co_consts tuple of constants used in the bytecode - co_filename name of file in which this code object was created - co_firstlineno number of first line in Python source code - co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg - co_lnotab encoded mapping of line numbers to bytecode indices - co_name name with which this code object was defined - co_names tuple of names of local variables - co_nlocals number of local variables - co_stacksize virtual machine stack space required - co_varnames tuple of names of arguments and local variables""" - return isinstance(object, types.CodeType) - -def isbuiltin(object): - """Return true if the object is a built-in function or method. - - Built-in functions and methods provide these attributes: - __doc__ documentation string - __name__ original name of this function or method - __self__ instance to which a method is bound, or None""" - return isinstance(object, types.BuiltinFunctionType) - -def isroutine(object): - """Return true if the object is any kind of function or method.""" - return (isbuiltin(object) - or isfunction(object) - or ismethod(object) - or ismethoddescriptor(object)) - -def getmembers(object, predicate=None): - """Return all members of an object as (name, value) pairs sorted by name. - Optionally, only return members that satisfy a given predicate.""" - results = [] - for key in dir(object): - value = getattr(object, key) - if not predicate or predicate(value): - results.append((key, value)) - results.sort() - return results - -def classify_class_attrs(cls): - """Return list of attribute-descriptor tuples. - - For each name in dir(cls), the return list contains a 4-tuple - with these elements: - - 0. The name (a string). - - 1. The kind of attribute this is, one of these strings: - 'class method' created via classmethod() - 'static method' created via staticmethod() - 'property' created via property() - 'method' any other flavor of method - 'data' not a method - - 2. The class which defined this attribute (a class). - - 3. The object as obtained directly from the defining class's - __dict__, not via getattr. This is especially important for - data attributes: C.data is just a data object, but - C.__dict__['data'] may be a data descriptor with additional - info, like a __doc__ string. - """ - - mro = getmro(cls) - names = dir(cls) - result = [] - for name in names: - # Get the object associated with the name. - # Getting an obj from the __dict__ sometimes reveals more than - # using getattr. Static and class methods are dramatic examples. - if name in cls.__dict__: - obj = cls.__dict__[name] - else: - obj = getattr(cls, name) - - # Figure out where it was defined. - homecls = getattr(obj, "__objclass__", None) - if homecls is None: - # search the dicts. - for base in mro: - if name in base.__dict__: - homecls = base - break - - # Get the object again, in order to get it from the defining - # __dict__ instead of via getattr (if possible). - if homecls is not None and name in homecls.__dict__: - obj = homecls.__dict__[name] - - # Also get the object via getattr. - obj_via_getattr = getattr(cls, name) - - # Classify the object. - if isinstance(obj, staticmethod): - kind = "static method" - elif isinstance(obj, classmethod): - kind = "class method" - elif isinstance(obj, property): - kind = "property" - elif (ismethod(obj_via_getattr) or - ismethoddescriptor(obj_via_getattr)): - kind = "method" - else: - kind = "data" - - result.append((name, kind, homecls, obj)) - - return result - -# ----------------------------------------------------------- class helpers -def _searchbases(cls, accum): - # Simulate the "classic class" search order. - if cls in accum: - return - accum.append(cls) - for base in cls.__bases__: - _searchbases(base, accum) - -def getmro(cls): - "Return tuple of base classes (including cls) in method resolution order." - if hasattr(cls, "__mro__"): - return cls.__mro__ - else: - result = [] - _searchbases(cls, result) - return tuple(result) - -# -------------------------------------------------- source code extraction -def indentsize(line): - """Return the indent size, in spaces, at the start of a line of text.""" - expline = string.expandtabs(line) - return len(expline) - len(string.lstrip(expline)) - -def getdoc(object): - """Get the documentation string for an object. - - All tabs are expanded to spaces. To clean up docstrings that are - indented to line up with blocks of code, any whitespace than can be - uniformly removed from the second line onwards is removed.""" - try: - doc = object.__doc__ - except AttributeError: - return None - if not isinstance(doc, (str, unicode)): - return None - try: - lines = string.split(string.expandtabs(doc), '\n') - except UnicodeError: - return None - else: - margin = None - for line in lines[1:]: - content = len(string.lstrip(line)) - if not content: continue - indent = len(line) - content - if margin is None: margin = indent - else: margin = min(margin, indent) - if margin is not None: - for i in range(1, len(lines)): lines[i] = lines[i][margin:] - return string.join(lines, '\n') - -def getfile(object): - """Work out which source or compiled file an object was defined in.""" - if ismodule(object): - if hasattr(object, '__file__'): - return object.__file__ - raise TypeError, 'arg is a built-in module' - if isclass(object): - object = sys.modules.get(object.__module__) - if hasattr(object, '__file__'): - return object.__file__ - raise TypeError, 'arg is a built-in class' - if ismethod(object): - object = object.im_func - if isfunction(object): - object = object.func_code - if istraceback(object): - object = object.tb_frame - if isframe(object): - object = object.f_code - if iscode(object): - return object.co_filename - raise TypeError, 'arg is not a module, class, method, ' \ - 'function, traceback, frame, or code object' - -def getmoduleinfo(path): - """Get the module name, suffix, mode, and module type for a given file.""" - filename = os.path.basename(path) - suffixes = map(lambda (suffix, mode, mtype): - (-len(suffix), suffix, mode, mtype), imp.get_suffixes()) - suffixes.sort() # try longest suffixes first, in case they overlap - for neglen, suffix, mode, mtype in suffixes: - if filename[neglen:] == suffix: - return filename[:neglen], suffix, mode, mtype - -def getmodulename(path): - """Return the module name for a given file, or None.""" - info = getmoduleinfo(path) - if info: return info[0] - -def getsourcefile(object): - """Return the Python source file an object was defined in, if it exists.""" - filename = getfile(object) - if string.lower(filename[-4:]) in ['.pyc', '.pyo']: - filename = filename[:-4] + '.py' - for suffix, mode, kind in imp.get_suffixes(): - if 'b' in mode and string.lower(filename[-len(suffix):]) == suffix: - # Looks like a binary file. We want to only return a text file. - return None - if os.path.exists(filename): - return filename - -def getabsfile(object): - """Return an absolute path to the source or compiled file for an object. - - The idea is for each object to have a unique origin, so this routine - normalizes the result as much as possible.""" - return os.path.normcase( - os.path.abspath(getsourcefile(object) or getfile(object))) - -modulesbyfile = {} - -def getmodule(object): - """Return the module an object was defined in, or None if not found.""" - if ismodule(object): - return object - if isclass(object): - return sys.modules.get(object.__module__) - try: - file = getabsfile(object) - except TypeError: - return None - if modulesbyfile.has_key(file): - return sys.modules[modulesbyfile[file]] - for module in sys.modules.values(): - if hasattr(module, '__file__'): - modulesbyfile[getabsfile(module)] = module.__name__ - if modulesbyfile.has_key(file): - return sys.modules[modulesbyfile[file]] - main = sys.modules['__main__'] - if hasattr(main, object.__name__): - mainobject = getattr(main, object.__name__) - if mainobject is object: - return main - builtin = sys.modules['__builtin__'] - if hasattr(builtin, object.__name__): - builtinobject = getattr(builtin, object.__name__) - if builtinobject is object: - return builtin - -def findsource(object): - """Return the entire source file and starting line number for an object. - - The argument may be a module, class, method, function, traceback, frame, - or code object. The source code is returned as a list of all the lines - in the file and the line number indexes a line in that list. An IOError - is raised if the source code cannot be retrieved.""" - try: - file = open(getsourcefile(object)) - except (TypeError, IOError): - raise IOError, 'could not get source code' - lines = file.readlines() - file.close() - - if ismodule(object): - return lines, 0 - - if isclass(object): - name = object.__name__ - pat = re.compile(r'^\s*class\s*' + name + r'\b') - for i in range(len(lines)): - if pat.match(lines[i]): return lines, i - else: raise IOError, 'could not find class definition' - - if ismethod(object): - object = object.im_func - if isfunction(object): - object = object.func_code - if istraceback(object): - object = object.tb_frame - if isframe(object): - object = object.f_code - if iscode(object): - if not hasattr(object, 'co_firstlineno'): - raise IOError, 'could not find function definition' - lnum = object.co_firstlineno - 1 - pat = re.compile(r'^(\s*def\s)|(.*\slambda(:|\s))') - while lnum > 0: - if pat.match(lines[lnum]): break - lnum = lnum - 1 - return lines, lnum - raise IOError, 'could not find code object' - -def getcomments(object): - """Get lines of comments immediately preceding an object's source code.""" - try: lines, lnum = findsource(object) - except IOError: return None - - if ismodule(object): - # Look for a comment block at the top of the file. - start = 0 - if lines and lines[0][:2] == '#!': start = 1 - while start < len(lines) and string.strip(lines[start]) in ['', '#']: - start = start + 1 - if start < len(lines) and lines[start][:1] == '#': - comments = [] - end = start - while end < len(lines) and lines[end][:1] == '#': - comments.append(string.expandtabs(lines[end])) - end = end + 1 - return string.join(comments, '') - - # Look for a preceding block of comments at the same indentation. - elif lnum > 0: - indent = indentsize(lines[lnum]) - end = lnum - 1 - if end >= 0 and string.lstrip(lines[end])[:1] == '#' and \ - indentsize(lines[end]) == indent: - comments = [string.lstrip(string.expandtabs(lines[end]))] - if end > 0: - end = end - 1 - comment = string.lstrip(string.expandtabs(lines[end])) - while comment[:1] == '#' and indentsize(lines[end]) == indent: - comments[:0] = [comment] - end = end - 1 - if end < 0: break - comment = string.lstrip(string.expandtabs(lines[end])) - while comments and string.strip(comments[0]) == '#': - comments[:1] = [] - while comments and string.strip(comments[-1]) == '#': - comments[-1:] = [] - return string.join(comments, '') - -class ListReader: - """Provide a readline() method to return lines from a list of strings.""" - def __init__(self, lines): - self.lines = lines - self.index = 0 - - def readline(self): - i = self.index - if i < len(self.lines): - self.index = i + 1 - return self.lines[i] - else: return '' - -class EndOfBlock(Exception): pass - -class BlockFinder: - """Provide a tokeneater() method to detect the end of a code block.""" - def __init__(self): - self.indent = 0 - self.started = 0 - self.last = 0 - - def tokeneater(self, type, token, (srow, scol), (erow, ecol), line): - if not self.started: - if type == tokenize.NAME: self.started = 1 - elif type == tokenize.NEWLINE: - self.last = srow - elif type == tokenize.INDENT: - self.indent = self.indent + 1 - elif type == tokenize.DEDENT: - self.indent = self.indent - 1 - if self.indent == 0: raise EndOfBlock, self.last - elif type == tokenize.NAME and scol == 0: - raise EndOfBlock, self.last - -def getblock(lines): - """Extract the block of code at the top of the given list of lines.""" - try: - tokenize.tokenize(ListReader(lines).readline, BlockFinder().tokeneater) - except EndOfBlock, eob: - return lines[:eob.args[0]] - # Fooling the indent/dedent logic implies a one-line definition - return lines[:1] - -def getsourcelines(object): - """Return a list of source lines and starting line number for an object. - - The argument may be a module, class, method, function, traceback, frame, - or code object. The source code is returned as a list of the lines - corresponding to the object and the line number indicates where in the - original source file the first line of code was found. An IOError is - raised if the source code cannot be retrieved.""" - lines, lnum = findsource(object) - - if ismodule(object): return lines, 0 - else: return getblock(lines[lnum:]), lnum + 1 - -def getsource(object): - """Return the text of the source code for an object. - - The argument may be a module, class, method, function, traceback, frame, - or code object. The source code is returned as a single string. An - IOError is raised if the source code cannot be retrieved.""" - lines, lnum = getsourcelines(object) - return string.join(lines, '') - -# --------------------------------------------------- class tree extraction -def walktree(classes, children, parent): - """Recursive helper function for getclasstree().""" - results = [] - classes.sort(lambda a, b: cmp(a.__name__, b.__name__)) - for c in classes: - results.append((c, c.__bases__)) - if children.has_key(c): - results.append(walktree(children[c], children, c)) - return results - -def getclasstree(classes, unique=0): - """Arrange the given list of classes into a hierarchy of nested lists. - - Where a nested list appears, it contains classes derived from the class - whose entry immediately precedes the list. Each entry is a 2-tuple - containing a class and a tuple of its base classes. If the 'unique' - argument is true, exactly one entry appears in the returned structure - for each class in the given list. Otherwise, classes using multiple - inheritance and their descendants will appear multiple times.""" - children = {} - roots = [] - for c in classes: - if c.__bases__: - for parent in c.__bases__: - if not children.has_key(parent): - children[parent] = [] - children[parent].append(c) - if unique and parent in classes: break - elif c not in roots: - roots.append(c) - for parent in children.keys(): - if parent not in classes: - roots.append(parent) - return walktree(roots, children, None) - -# ------------------------------------------------ argument list extraction -# These constants are from Python's compile.h. -CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8 - -def getargs(co): - """Get information about the arguments accepted by a code object. - - Three things are returned: (args, varargs, varkw), where 'args' is - a list of argument names (possibly containing nested lists), and - 'varargs' and 'varkw' are the names of the * and ** arguments or None.""" - if not iscode(co): raise TypeError, 'arg is not a code object' - - nargs = co.co_argcount - names = co.co_varnames - args = list(names[:nargs]) - step = 0 - - # The following acrobatics are for anonymous (tuple) arguments. - if not sys.platform.startswith('java'):#Jython doesn't have co_code - code = co.co_code - import dis - for i in range(nargs): - if args[i][:1] in ['', '.']: - stack, remain, count = [], [], [] - while step < len(code): - op = ord(code[step]) - step = step + 1 - if op >= dis.HAVE_ARGUMENT: - opname = dis.opname[op] - value = ord(code[step]) + ord(code[step + 1]) * 256 - step = step + 2 - if opname in ['UNPACK_TUPLE', 'UNPACK_SEQUENCE']: - remain.append(value) - count.append(value) - elif opname == 'STORE_FAST': - stack.append(names[value]) - remain[-1] = remain[-1] - 1 - while remain[-1] == 0: - remain.pop() - size = count.pop() - stack[-size:] = [stack[-size:]] - if not remain: break - remain[-1] = remain[-1] - 1 - if not remain: break - args[i] = stack[0] - - varargs = None - if co.co_flags & CO_VARARGS: - varargs = co.co_varnames[nargs] - nargs = nargs + 1 - varkw = None - if co.co_flags & CO_VARKEYWORDS: - varkw = co.co_varnames[nargs] - return args, varargs, varkw - -def getargspec(func): - """Get the names and default values of a function's arguments. - - A tuple of four things is returned: (args, varargs, varkw, defaults). - 'args' is a list of the argument names (it may contain nested lists). - 'varargs' and 'varkw' are the names of the * and ** arguments or None. - 'defaults' is an n-tuple of the default values of the last n arguments.""" - if ismethod(func): - func = func.im_func - if not isfunction(func): raise TypeError, 'arg is not a Python function' - args, varargs, varkw = getargs(func.func_code) - return args, varargs, varkw, func.func_defaults - -def getargvalues(frame): - """Get information about arguments passed into a particular frame. - - A tuple of four things is returned: (args, varargs, varkw, locals). - 'args' is a list of the argument names (it may contain nested lists). - 'varargs' and 'varkw' are the names of the * and ** arguments or None. - 'locals' is the locals dictionary of the given frame.""" - args, varargs, varkw = getargs(frame.f_code) - return args, varargs, varkw, frame.f_locals - -def joinseq(seq): - if len(seq) == 1: - return '(' + seq[0] + ',)' - else: - return '(' + string.join(seq, ', ') + ')' - -def strseq(object, convert, join=joinseq): - """Recursively walk a sequence, stringifying each element.""" - if type(object) in [types.ListType, types.TupleType]: - return join(map(lambda o, c=convert, j=join: strseq(o, c, j), object)) - else: - return convert(object) - -def formatargspec(args, varargs=None, varkw=None, defaults=None, - formatarg=str, - formatvarargs=lambda name: '*' + name, - formatvarkw=lambda name: '**' + name, - formatvalue=lambda value: '=' + repr(value), - join=joinseq): - """Format an argument spec from the 4 values returned by getargspec. - - The first four arguments are (args, varargs, varkw, defaults). The - other four arguments are the corresponding optional formatting functions - that are called to turn names and values into strings. The ninth - argument is an optional function to format the sequence of arguments.""" - specs = [] - if defaults: - firstdefault = len(args) - len(defaults) - for i in range(len(args)): - spec = strseq(args[i], formatarg, join) - if defaults and i >= firstdefault: - spec = spec + formatvalue(defaults[i - firstdefault]) - specs.append(spec) - if varargs: - specs.append(formatvarargs(varargs)) - if varkw: - specs.append(formatvarkw(varkw)) - return '(' + string.join(specs, ', ') + ')' - -def formatargvalues(args, varargs, varkw, locals, - formatarg=str, - formatvarargs=lambda name: '*' + name, - formatvarkw=lambda name: '**' + name, - formatvalue=lambda value: '=' + repr(value), - join=joinseq): - """Format an argument spec from the 4 values returned by getargvalues. - - The first four arguments are (args, varargs, varkw, locals). The - next four arguments are the corresponding optional formatting functions - that are called to turn names and values into strings. The ninth - argument is an optional function to format the sequence of arguments.""" - def convert(name, locals=locals, - formatarg=formatarg, formatvalue=formatvalue): - return formatarg(name) + formatvalue(locals[name]) - specs = [] - for i in range(len(args)): - specs.append(strseq(args[i], convert, join)) - if varargs: - specs.append(formatvarargs(varargs) + formatvalue(locals[varargs])) - if varkw: - specs.append(formatvarkw(varkw) + formatvalue(locals[varkw])) - return '(' + string.join(specs, ', ') + ')' - -# -------------------------------------------------- stack frame extraction -def getframeinfo(frame, context=1): - """Get information about a frame or traceback object. - - A tuple of five things is returned: the filename, the line number of - the current line, the function name, a list of lines of context from - the source code, and the index of the current line within that list. - The optional second argument specifies the number of lines of context - to return, which are centered around the current line.""" - raise NotImplementedError -# if istraceback(frame): -# frame = frame.tb_frame -# if not isframe(frame): -# raise TypeError, 'arg is not a frame or traceback object' -# -# filename = getsourcefile(frame) -# lineno = getlineno(frame) -# if context > 0: -# start = lineno - 1 - context//2 -# try: -# lines, lnum = findsource(frame) -# except IOError: -# lines = index = None -# else: -# start = max(start, 1) -# start = min(start, len(lines) - context) -# lines = lines[start:start+context] -# index = lineno - 1 - start -# else: -# lines = index = None -# -# return (filename, lineno, frame.f_code.co_name, lines, index) - -def getlineno(frame): - """Get the line number from a frame object, allowing for optimization.""" - # Written by Marc-Andr Lemburg; revised by Jim Hugunin and Fredrik Lundh. - lineno = frame.f_lineno - code = frame.f_code - if hasattr(code, 'co_lnotab'): - table = code.co_lnotab - lineno = code.co_firstlineno - addr = 0 - for i in range(0, len(table), 2): - addr = addr + ord(table[i]) - if addr > frame.f_lasti: break - lineno = lineno + ord(table[i + 1]) - return lineno - -def getouterframes(frame, context=1): - """Get a list of records for a frame and all higher (calling) frames. - - Each record contains a frame object, filename, line number, function - name, a list of lines of context, and index within the context.""" - framelist = [] - while frame: - framelist.append((frame,) + getframeinfo(frame, context)) - frame = frame.f_back - return framelist - -def getinnerframes(tb, context=1): - """Get a list of records for a traceback's frame and all lower frames. - - Each record contains a frame object, filename, line number, function - name, a list of lines of context, and index within the context.""" - framelist = [] - while tb: - framelist.append((tb.tb_frame,) + getframeinfo(tb, context)) - tb = tb.tb_next - return framelist - -def currentframe(): - """Return the frame object for the caller's stack frame.""" - try: - raise 'catch me' - except: - return sys.exc_traceback.tb_frame.f_back #@UndefinedVariable - -if hasattr(sys, '_getframe'): currentframe = sys._getframe - -def stack(context=1): - """Return a list of records for the stack above the caller's frame.""" - return getouterframes(currentframe().f_back, context) - -def trace(context=1): - """Return a list of records for the stack below the current exception.""" - return getinnerframes(sys.exc_traceback, context) #@UndefinedVariable diff --git a/src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_pkgutil_old.py b/src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_pkgutil_old.py deleted file mode 100644 index ce072ec9..00000000 --- a/src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_pkgutil_old.py +++ /dev/null @@ -1,591 +0,0 @@ -"""Utilities to support packages.""" - -# NOTE: This module must remain compatible with Python 2.3, as it is shared -# by setuptools for distribution with Python 2.3 and up. - -import os -import sys -import imp -import os.path -from types import ModuleType - -__all__ = [ - 'get_importer', 'iter_importers', 'get_loader', 'find_loader', - 'walk_packages', 'iter_modules', 'get_data', - 'ImpImporter', 'ImpLoader', 'read_code', 'extend_path', -] - -def read_code(stream): - # This helper is needed in order for the PEP 302 emulation to - # correctly handle compiled files - import marshal - - magic = stream.read(4) - if magic != imp.get_magic(): - return None - - stream.read(4) # Skip timestamp - return marshal.load(stream) - - -def simplegeneric(func): - """Make a trivial single-dispatch generic function""" - registry = {} - def wrapper(*args, **kw): - ob = args[0] - try: - cls = ob.__class__ - except AttributeError: - cls = type(ob) - try: - mro = cls.__mro__ - except AttributeError: - try: - class cls(cls, object): - pass - mro = cls.__mro__[1:] - except TypeError: - mro = object, # must be an ExtensionClass or some such :( - for t in mro: - if t in registry: - return registry[t](*args, **kw) - else: - return func(*args, **kw) - try: - wrapper.__name__ = func.__name__ - except (TypeError, AttributeError): - pass # Python 2.3 doesn't allow functions to be renamed - - def register(typ, func=None): - if func is None: - return lambda f: register(typ, f) - registry[typ] = func - return func - - wrapper.__dict__ = func.__dict__ - wrapper.__doc__ = func.__doc__ - wrapper.register = register - return wrapper - - -def walk_packages(path=None, prefix='', onerror=None): - """Yields (module_loader, name, ispkg) for all modules recursively - on path, or, if path is None, all accessible modules. - - 'path' should be either None or a list of paths to look for - modules in. - - 'prefix' is a string to output on the front of every module name - on output. - - Note that this function must import all *packages* (NOT all - modules!) on the given path, in order to access the __path__ - attribute to find submodules. - - 'onerror' is a function which gets called with one argument (the - name of the package which was being imported) if any exception - occurs while trying to import a package. If no onerror function is - supplied, ImportErrors are caught and ignored, while all other - exceptions are propagated, terminating the search. - - Examples: - - # list all modules python can access - walk_packages() - - # list all submodules of ctypes - walk_packages(ctypes.__path__, ctypes.__name__+'.') - """ - - def seen(p, m={}): - if p in m: - return True - m[p] = True - - for importer, name, ispkg in iter_modules(path, prefix): - yield importer, name, ispkg - - if ispkg: - try: - __import__(name) - except ImportError: - if onerror is not None: - onerror(name) - except Exception: - if onerror is not None: - onerror(name) - else: - raise - else: - path = getattr(sys.modules[name], '__path__', None) or [] - - # don't traverse path items we've seen before - path = [p for p in path if not seen(p)] - - for item in walk_packages(path, name+'.', onerror): - yield item - - -def iter_modules(path=None, prefix=''): - """Yields (module_loader, name, ispkg) for all submodules on path, - or, if path is None, all top-level modules on sys.path. - - 'path' should be either None or a list of paths to look for - modules in. - - 'prefix' is a string to output on the front of every module name - on output. - """ - - if path is None: - importers = iter_importers() - else: - importers = map(get_importer, path) - - yielded = {} - for i in importers: - for name, ispkg in iter_importer_modules(i, prefix): - if name not in yielded: - yielded[name] = 1 - yield i, name, ispkg - - -#@simplegeneric -def iter_importer_modules(importer, prefix=''): - if not hasattr(importer, 'iter_modules'): - return [] - return importer.iter_modules(prefix) - -iter_importer_modules = simplegeneric(iter_importer_modules) - - -class ImpImporter: - """PEP 302 Importer that wraps Python's "classic" import algorithm - - ImpImporter(dirname) produces a PEP 302 importer that searches that - directory. ImpImporter(None) produces a PEP 302 importer that searches - the current sys.path, plus any modules that are frozen or built-in. - - Note that ImpImporter does not currently support being used by placement - on sys.meta_path. - """ - - def __init__(self, path=None): - self.path = path - - def find_module(self, fullname, path=None): - # Note: we ignore 'path' argument since it is only used via meta_path - subname = fullname.split(".")[-1] - if subname != fullname and self.path is None: - return None - if self.path is None: - path = None - else: - path = [os.path.realpath(self.path)] - try: - file, filename, etc = imp.find_module(subname, path) - except ImportError: - return None - return ImpLoader(fullname, file, filename, etc) - - def iter_modules(self, prefix=''): - if self.path is None or not os.path.isdir(self.path): - return - - yielded = {} - import inspect - try: - filenames = os.listdir(self.path) - except OSError: - # ignore unreadable directories like import does - filenames = [] - filenames.sort() # handle packages before same-named modules - - for fn in filenames: - modname = inspect.getmodulename(fn) - if modname=='__init__' or modname in yielded: - continue - - path = os.path.join(self.path, fn) - ispkg = False - - if not modname and os.path.isdir(path) and '.' not in fn: - modname = fn - try: - dircontents = os.listdir(path) - except OSError: - # ignore unreadable directories like import does - dircontents = [] - for fn in dircontents: - subname = inspect.getmodulename(fn) - if subname=='__init__': - ispkg = True - break - else: - continue # not a package - - if modname and '.' not in modname: - yielded[modname] = 1 - yield prefix + modname, ispkg - - -class ImpLoader: - """PEP 302 Loader that wraps Python's "classic" import algorithm - """ - code = source = None - - def __init__(self, fullname, file, filename, etc): - self.file = file - self.filename = filename - self.fullname = fullname - self.etc = etc - - def load_module(self, fullname): - self._reopen() - try: - mod = imp.load_module(fullname, self.file, self.filename, self.etc) - finally: - if self.file: - self.file.close() - # Note: we don't set __loader__ because we want the module to look - # normal; i.e. this is just a wrapper for standard import machinery - return mod - - def get_data(self, pathname): - return open(pathname, "rb").read() - - def _reopen(self): - if self.file and self.file.closed: - mod_type = self.etc[2] - if mod_type==imp.PY_SOURCE: - self.file = open(self.filename, 'rU') - elif mod_type in (imp.PY_COMPILED, imp.C_EXTENSION): - self.file = open(self.filename, 'rb') - - def _fix_name(self, fullname): - if fullname is None: - fullname = self.fullname - elif fullname != self.fullname: - raise ImportError("Loader for module %s cannot handle " - "module %s" % (self.fullname, fullname)) - return fullname - - def is_package(self, fullname): - fullname = self._fix_name(fullname) - return self.etc[2]==imp.PKG_DIRECTORY - - def get_code(self, fullname=None): - fullname = self._fix_name(fullname) - if self.code is None: - mod_type = self.etc[2] - if mod_type==imp.PY_SOURCE: - source = self.get_source(fullname) - self.code = compile(source, self.filename, 'exec') - elif mod_type==imp.PY_COMPILED: - self._reopen() - try: - self.code = read_code(self.file) - finally: - self.file.close() - elif mod_type==imp.PKG_DIRECTORY: - self.code = self._get_delegate().get_code() - return self.code - - def get_source(self, fullname=None): - fullname = self._fix_name(fullname) - if self.source is None: - mod_type = self.etc[2] - if mod_type==imp.PY_SOURCE: - self._reopen() - try: - self.source = self.file.read() - finally: - self.file.close() - elif mod_type==imp.PY_COMPILED: - if os.path.exists(self.filename[:-1]): - f = open(self.filename[:-1], 'rU') - self.source = f.read() - f.close() - elif mod_type==imp.PKG_DIRECTORY: - self.source = self._get_delegate().get_source() - return self.source - - - def _get_delegate(self): - return ImpImporter(self.filename).find_module('__init__') - - def get_filename(self, fullname=None): - fullname = self._fix_name(fullname) - mod_type = self.etc[2] - if self.etc[2]==imp.PKG_DIRECTORY: - return self._get_delegate().get_filename() - elif self.etc[2] in (imp.PY_SOURCE, imp.PY_COMPILED, imp.C_EXTENSION): - return self.filename - return None - - -try: - import zipimport - from zipimport import zipimporter - - def iter_zipimport_modules(importer, prefix=''): - dirlist = zipimport._zip_directory_cache[importer.archive].keys() - dirlist.sort() - _prefix = importer.prefix - plen = len(_prefix) - yielded = {} - import inspect - for fn in dirlist: - if not fn.startswith(_prefix): - continue - - fn = fn[plen:].split(os.sep) - - if len(fn)==2 and fn[1].startswith('__init__.py'): - if fn[0] not in yielded: - yielded[fn[0]] = 1 - yield fn[0], True - - if len(fn)!=1: - continue - - modname = inspect.getmodulename(fn[0]) - if modname=='__init__': - continue - - if modname and '.' not in modname and modname not in yielded: - yielded[modname] = 1 - yield prefix + modname, False - - iter_importer_modules.register(zipimporter, iter_zipimport_modules) - -except ImportError: - pass - - -def get_importer(path_item): - """Retrieve a PEP 302 importer for the given path item - - The returned importer is cached in sys.path_importer_cache - if it was newly created by a path hook. - - If there is no importer, a wrapper around the basic import - machinery is returned. This wrapper is never inserted into - the importer cache (None is inserted instead). - - The cache (or part of it) can be cleared manually if a - rescan of sys.path_hooks is necessary. - """ - try: - importer = sys.path_importer_cache[path_item] - except KeyError: - for path_hook in sys.path_hooks: - try: - importer = path_hook(path_item) - break - except ImportError: - pass - else: - importer = None - sys.path_importer_cache.setdefault(path_item, importer) - - if importer is None: - try: - importer = ImpImporter(path_item) - except ImportError: - importer = None - return importer - - -def iter_importers(fullname=""): - """Yield PEP 302 importers for the given module name - - If fullname contains a '.', the importers will be for the package - containing fullname, otherwise they will be importers for sys.meta_path, - sys.path, and Python's "classic" import machinery, in that order. If - the named module is in a package, that package is imported as a side - effect of invoking this function. - - Non PEP 302 mechanisms (e.g. the Windows registry) used by the - standard import machinery to find files in alternative locations - are partially supported, but are searched AFTER sys.path. Normally, - these locations are searched BEFORE sys.path, preventing sys.path - entries from shadowing them. - - For this to cause a visible difference in behaviour, there must - be a module or package name that is accessible via both sys.path - and one of the non PEP 302 file system mechanisms. In this case, - the emulation will find the former version, while the builtin - import mechanism will find the latter. - - Items of the following types can be affected by this discrepancy: - imp.C_EXTENSION, imp.PY_SOURCE, imp.PY_COMPILED, imp.PKG_DIRECTORY - """ - if fullname.startswith('.'): - raise ImportError("Relative module names not supported") - if '.' in fullname: - # Get the containing package's __path__ - pkg = '.'.join(fullname.split('.')[:-1]) - if pkg not in sys.modules: - __import__(pkg) - path = getattr(sys.modules[pkg], '__path__', None) or [] - else: - for importer in sys.meta_path: - yield importer - path = sys.path - for item in path: - yield get_importer(item) - if '.' not in fullname: - yield ImpImporter() - -def get_loader(module_or_name): - """Get a PEP 302 "loader" object for module_or_name - - If the module or package is accessible via the normal import - mechanism, a wrapper around the relevant part of that machinery - is returned. Returns None if the module cannot be found or imported. - If the named module is not already imported, its containing package - (if any) is imported, in order to establish the package __path__. - - This function uses iter_importers(), and is thus subject to the same - limitations regarding platform-specific special import locations such - as the Windows registry. - """ - if module_or_name in sys.modules: - module_or_name = sys.modules[module_or_name] - if isinstance(module_or_name, ModuleType): - module = module_or_name - loader = getattr(module, '__loader__', None) - if loader is not None: - return loader - fullname = module.__name__ - else: - fullname = module_or_name - return find_loader(fullname) - -def find_loader(fullname): - """Find a PEP 302 "loader" object for fullname - - If fullname contains dots, path must be the containing package's __path__. - Returns None if the module cannot be found or imported. This function uses - iter_importers(), and is thus subject to the same limitations regarding - platform-specific special import locations such as the Windows registry. - """ - for importer in iter_importers(fullname): - loader = importer.find_module(fullname) - if loader is not None: - return loader - - return None - - -def extend_path(path, name): - """Extend a package's path. - - Intended use is to place the following code in a package's __init__.py: - - from pkgutil import extend_path - __path__ = extend_path(__path__, __name__) - - This will add to the package's __path__ all subdirectories of - directories on sys.path named after the package. This is useful - if one wants to distribute different parts of a single logical - package as multiple directories. - - It also looks for *.pkg files beginning where * matches the name - argument. This feature is similar to *.pth files (see site.py), - except that it doesn't special-case lines starting with 'import'. - A *.pkg file is trusted at face value: apart from checking for - duplicates, all entries found in a *.pkg file are added to the - path, regardless of whether they are exist the filesystem. (This - is a feature.) - - If the input path is not a list (as is the case for frozen - packages) it is returned unchanged. The input path is not - modified; an extended copy is returned. Items are only appended - to the copy at the end. - - It is assumed that sys.path is a sequence. Items of sys.path that - are not (unicode or 8-bit) strings referring to existing - directories are ignored. Unicode items of sys.path that cause - errors when used as filenames may cause this function to raise an - exception (in line with os.path.isdir() behavior). - """ - - if not isinstance(path, list): - # This could happen e.g. when this is called from inside a - # frozen package. Return the path unchanged in that case. - return path - - pname = os.path.join(*name.split('.')) # Reconstitute as relative path - # Just in case os.extsep != '.' - sname = os.extsep.join(name.split('.')) - sname_pkg = sname + os.extsep + "pkg" - init_py = "__init__" + os.extsep + "py" - - path = path[:] # Start with a copy of the existing path - - for dir in sys.path: - if not isinstance(dir, basestring) or not os.path.isdir(dir): - continue - subdir = os.path.join(dir, pname) - # XXX This may still add duplicate entries to path on - # case-insensitive filesystems - initfile = os.path.join(subdir, init_py) - if subdir not in path and os.path.isfile(initfile): - path.append(subdir) - # XXX Is this the right thing for subpackages like zope.app? - # It looks for a file named "zope.app.pkg" - pkgfile = os.path.join(dir, sname_pkg) - if os.path.isfile(pkgfile): - try: - f = open(pkgfile) - except IOError, msg: - sys.stderr.write("Can't open %s: %s\n" % - (pkgfile, msg)) - else: - for line in f: - line = line.rstrip('\n') - if not line or line.startswith('#'): - continue - path.append(line) # Don't check for existence! - f.close() - - return path - -def get_data(package, resource): - """Get a resource from a package. - - This is a wrapper round the PEP 302 loader get_data API. The package - argument should be the name of a package, in standard module format - (foo.bar). The resource argument should be in the form of a relative - filename, using '/' as the path separator. The parent directory name '..' - is not allowed, and nor is a rooted name (starting with a '/'). - - The function returns a binary string, which is the contents of the - specified resource. - - For packages located in the filesystem, which have already been imported, - this is the rough equivalent of - - d = os.path.dirname(sys.modules[package].__file__) - data = open(os.path.join(d, resource), 'rb').read() - - If the package cannot be located or loaded, or it uses a PEP 302 loader - which does not support get_data(), then None is returned. - """ - - loader = get_loader(package) - if loader is None or not hasattr(loader, 'get_data'): - return None - mod = sys.modules.get(package) or loader.load_module(package) - if mod is None or not hasattr(mod, '__file__'): - return None - - # Modify the resource name to be compatible with the loader.get_data - # signature - an os.path format "filename" starting with the dirname of - # the package's __file__ - parts = resource.split('/') - parts.insert(0, os.path.dirname(mod.__file__)) - resource_name = os.path.join(*parts) - return loader.get_data(resource_name) diff --git a/src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_saved_modules.py b/src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_saved_modules.py index 3866e752..b2288e50 100644 --- a/src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_saved_modules.py +++ b/src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_saved_modules.py @@ -1,8 +1,6 @@ import sys import os -IS_PY2 = sys.version_info < (3,) - def find_in_pythonpath(module_name): # Check all the occurrences where we could match the given module/package in the PYTHONPATH. @@ -92,36 +90,20 @@ with VerifyShadowedImport('select') as verify_shadowed: with VerifyShadowedImport('code') as verify_shadowed: import code as _code; verify_shadowed.check(_code, ['compile_command', 'InteractiveInterpreter']) -if IS_PY2: - with VerifyShadowedImport('thread') as verify_shadowed: - import thread; verify_shadowed.check(thread, ['start_new_thread', 'allocate_lock']) +with VerifyShadowedImport('_thread') as verify_shadowed: + import _thread as thread; verify_shadowed.check(thread, ['start_new_thread', 'start_new', 'allocate_lock']) - with VerifyShadowedImport('Queue') as verify_shadowed: - import Queue as _queue; verify_shadowed.check(_queue, ['Queue', 'LifoQueue', 'Empty', 'Full', 'deque']) +with VerifyShadowedImport('queue') as verify_shadowed: + import queue as _queue; verify_shadowed.check(_queue, ['Queue', 'LifoQueue', 'Empty', 'Full', 'deque']) - with VerifyShadowedImport('xmlrpclib') as verify_shadowed: - import xmlrpclib; verify_shadowed.check(xmlrpclib, ['ServerProxy', 'Marshaller', 'Server']) +with VerifyShadowedImport('xmlrpclib') as verify_shadowed: + import xmlrpc.client as xmlrpclib; verify_shadowed.check(xmlrpclib, ['ServerProxy', 'Marshaller', 'Server']) - with VerifyShadowedImport('SimpleXMLRPCServer') as verify_shadowed: - import SimpleXMLRPCServer as _pydev_SimpleXMLRPCServer; verify_shadowed.check(_pydev_SimpleXMLRPCServer, ['SimpleXMLRPCServer']) +with VerifyShadowedImport('xmlrpc.server') as verify_shadowed: + import xmlrpc.server as xmlrpcserver; verify_shadowed.check(xmlrpcserver, ['SimpleXMLRPCServer']) - with VerifyShadowedImport('BaseHTTPServer') as verify_shadowed: - import BaseHTTPServer; verify_shadowed.check(BaseHTTPServer, ['BaseHTTPRequestHandler']) -else: - with VerifyShadowedImport('_thread') as verify_shadowed: - import _thread as thread; verify_shadowed.check(thread, ['start_new_thread', 'start_new', 'allocate_lock']) - - with VerifyShadowedImport('queue') as verify_shadowed: - import queue as _queue; verify_shadowed.check(_queue, ['Queue', 'LifoQueue', 'Empty', 'Full', 'deque']) - - with VerifyShadowedImport('xmlrpclib') as verify_shadowed: - import xmlrpc.client as xmlrpclib; verify_shadowed.check(xmlrpclib, ['ServerProxy', 'Marshaller', 'Server']) - - with VerifyShadowedImport('xmlrpc.server') as verify_shadowed: - import xmlrpc.server as _pydev_SimpleXMLRPCServer; verify_shadowed.check(_pydev_SimpleXMLRPCServer, ['SimpleXMLRPCServer']) - - with VerifyShadowedImport('http.server') as verify_shadowed: - import http.server as BaseHTTPServer; verify_shadowed.check(BaseHTTPServer, ['BaseHTTPRequestHandler']) +with VerifyShadowedImport('http.server') as verify_shadowed: + import http.server as BaseHTTPServer; verify_shadowed.check(BaseHTTPServer, ['BaseHTTPRequestHandler']) # If set, this is a version of the threading.enumerate that doesn't have the patching to remove the pydevd threads. # Note: as it can't be set during execution, don't import the name (import the module and access it through its name). diff --git a/src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_sys_patch.py b/src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_sys_patch.py index 0220ad0d..2000d014 100644 --- a/src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_sys_patch.py +++ b/src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_sys_patch.py @@ -3,14 +3,17 @@ import sys def patch_sys_module(): + def patched_exc_info(fun): + def pydev_debugger_exc_info(): type, value, traceback = fun() if type == ImportError: - #we should not show frame added by plugin_import call + # we should not show frame added by plugin_import call if traceback and hasattr(traceback, "tb_next"): return type, value, traceback.tb_next return type, value, traceback + return pydev_debugger_exc_info system_exc_info = sys.exc_info @@ -20,19 +23,18 @@ def patch_sys_module(): def patched_reload(orig_reload): + def pydev_debugger_reload(module): orig_reload(module) if module.__name__ == "sys": # if sys module was reloaded we should patch it again patch_sys_module() + return pydev_debugger_reload def patch_reload(): - if sys.version_info[0] >= 3: - import builtins # Py3 - else: - import __builtin__ as builtins + import builtins # Py3 if hasattr(builtins, "reload"): sys.builtin_orig_reload = builtins.reload @@ -56,10 +58,7 @@ def patch_reload(): def cancel_patches_in_sys_module(): sys.exc_info = sys.system_exc_info # @UndefinedVariable - if sys.version_info[0] >= 3: - import builtins # Py3 - else: - import __builtin__ as builtins + import builtins # Py3 if hasattr(sys, "builtin_orig_reload"): builtins.reload = sys.builtin_orig_reload diff --git a/src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_xmlrpclib.py b/src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_xmlrpclib.py deleted file mode 100644 index f2827942..00000000 --- a/src/debugpy/_vendored/pydevd/_pydev_imps/_pydev_xmlrpclib.py +++ /dev/null @@ -1,1493 +0,0 @@ -#Just a copy of the version in python 2.5 to be used if it's not available in jython 2.1 -import sys - -# -# XML-RPC CLIENT LIBRARY -# -# an XML-RPC client interface for Python. -# -# the marshalling and response parser code can also be used to -# implement XML-RPC servers. -# -# Notes: -# this version is designed to work with Python 2.1 or newer. -# -# History: -# 1999-01-14 fl Created -# 1999-01-15 fl Changed dateTime to use localtime -# 1999-01-16 fl Added Binary/base64 element, default to RPC2 service -# 1999-01-19 fl Fixed array data element (from Skip Montanaro) -# 1999-01-21 fl Fixed dateTime constructor, etc. -# 1999-02-02 fl Added fault handling, handle empty sequences, etc. -# 1999-02-10 fl Fixed problem with empty responses (from Skip Montanaro) -# 1999-06-20 fl Speed improvements, pluggable parsers/transports (0.9.8) -# 2000-11-28 fl Changed boolean to check the truth value of its argument -# 2001-02-24 fl Added encoding/Unicode/SafeTransport patches -# 2001-02-26 fl Added compare support to wrappers (0.9.9/1.0b1) -# 2001-03-28 fl Make sure response tuple is a singleton -# 2001-03-29 fl Don't require empty params element (from Nicholas Riley) -# 2001-06-10 fl Folded in _xmlrpclib accelerator support (1.0b2) -# 2001-08-20 fl Base xmlrpclib.Error on built-in Exception (from Paul Prescod) -# 2001-09-03 fl Allow Transport subclass to override getparser -# 2001-09-10 fl Lazy import of urllib, cgi, xmllib (20x import speedup) -# 2001-10-01 fl Remove containers from memo cache when done with them -# 2001-10-01 fl Use faster escape method (80% dumps speedup) -# 2001-10-02 fl More dumps microtuning -# 2001-10-04 fl Make sure import expat gets a parser (from Guido van Rossum) -# 2001-10-10 sm Allow long ints to be passed as ints if they don't overflow -# 2001-10-17 sm Test for int and long overflow (allows use on 64-bit systems) -# 2001-11-12 fl Use repr() to marshal doubles (from Paul Felix) -# 2002-03-17 fl Avoid buffered read when possible (from James Rucker) -# 2002-04-07 fl Added pythondoc comments -# 2002-04-16 fl Added __str__ methods to datetime/binary wrappers -# 2002-05-15 fl Added error constants (from Andrew Kuchling) -# 2002-06-27 fl Merged with Python CVS version -# 2002-10-22 fl Added basic authentication (based on code from Phillip Eby) -# 2003-01-22 sm Add support for the bool type -# 2003-02-27 gvr Remove apply calls -# 2003-04-24 sm Use cStringIO if available -# 2003-04-25 ak Add support for nil -# 2003-06-15 gn Add support for time.struct_time -# 2003-07-12 gp Correct marshalling of Faults -# 2003-10-31 mvl Add multicall support -# 2004-08-20 mvl Bump minimum supported Python version to 2.1 -# -# Copyright (c) 1999-2002 by Secret Labs AB. -# Copyright (c) 1999-2002 by Fredrik Lundh. -# -# info@pythonware.com -# http://www.pythonware.com -# -# -------------------------------------------------------------------- -# The XML-RPC client interface is -# -# Copyright (c) 1999-2002 by Secret Labs AB -# Copyright (c) 1999-2002 by Fredrik Lundh -# -# By obtaining, using, and/or copying this software and/or its -# associated documentation, you agree that you have read, understood, -# and will comply with the following terms and conditions: -# -# Permission to use, copy, modify, and distribute this software and -# its associated documentation for any purpose and without fee is -# hereby granted, provided that the above copyright notice appears in -# all copies, and that both that copyright notice and this permission -# notice appear in supporting documentation, and that the name of -# Secret Labs AB or the author not be used in advertising or publicity -# pertaining to distribution of the software without specific, written -# prior permission. -# -# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD -# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- -# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR -# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY -# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, -# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS -# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE -# OF THIS SOFTWARE. -# -------------------------------------------------------------------- - -# -# things to look into some day: - -# TODO: sort out True/False/boolean issues for Python 2.3 - -""" -An XML-RPC client interface for Python. - -The marshalling and response parser code can also be used to -implement XML-RPC servers. - -Exported exceptions: - - Error Base class for client errors - ProtocolError Indicates an HTTP protocol error - ResponseError Indicates a broken response package - Fault Indicates an XML-RPC fault package - -Exported classes: - - ServerProxy Represents a logical connection to an XML-RPC server - - MultiCall Executor of boxcared xmlrpc requests - Boolean boolean wrapper to generate a "boolean" XML-RPC value - DateTime dateTime wrapper for an ISO 8601 string or time tuple or - localtime integer value to generate a "dateTime.iso8601" - XML-RPC value - Binary binary data wrapper - - SlowParser Slow but safe standard parser (based on xmllib) - Marshaller Generate an XML-RPC params chunk from a Python data structure - Unmarshaller Unmarshal an XML-RPC response from incoming XML event message - Transport Handles an HTTP transaction to an XML-RPC server - SafeTransport Handles an HTTPS transaction to an XML-RPC server - -Exported constants: - - True - False - -Exported functions: - - boolean Convert any Python value to an XML-RPC boolean - getparser Create instance of the fastest available parser & attach - to an unmarshalling object - dumps Convert an argument tuple or a Fault instance to an XML-RPC - request (or response, if the methodresponse option is used). - loads Convert an XML-RPC packet to unmarshalled data plus a method - name (None if not present). -""" - -import re, string, time, operator - -from types import * - -# -------------------------------------------------------------------- -# Internal stuff - -try: - unicode -except NameError: - unicode = None # unicode support not available - -try: - import datetime -except ImportError: - datetime = None - -try: - _bool_is_builtin = False.__class__.__name__ == "bool" -except (NameError, AttributeError): - _bool_is_builtin = 0 - -def _decode(data, encoding, is8bit=re.compile("[\x80-\xff]").search): - # decode non-ascii string (if possible) - if unicode and encoding and is8bit(data): - data = unicode(data, encoding) - return data - -def escape(s, replace=string.replace): - s = replace(s, "&", "&") - s = replace(s, "<", "<") - return replace(s, ">", ">",) - -if unicode: - def _stringify(string): - # convert to 7-bit ascii if possible - try: - return string.encode("ascii") - except UnicodeError: - return string -else: - def _stringify(string): - return string - -__version__ = "1.0.1" - -# xmlrpc integer limits -try: - long -except NameError: - long = int -MAXINT = long(2) ** 31 - 1 -MININT = long(-2) ** 31 - -# -------------------------------------------------------------------- -# Error constants (from Dan Libby's specification at -# http://xmlrpc-epi.sourceforge.net/specs/rfc.fault_codes.php) - -# Ranges of errors -PARSE_ERROR = -32700 -SERVER_ERROR = -32600 -APPLICATION_ERROR = -32500 -SYSTEM_ERROR = -32400 -TRANSPORT_ERROR = -32300 - -# Specific errors -NOT_WELLFORMED_ERROR = -32700 -UNSUPPORTED_ENCODING = -32701 -INVALID_ENCODING_CHAR = -32702 -INVALID_XMLRPC = -32600 -METHOD_NOT_FOUND = -32601 -INVALID_METHOD_PARAMS = -32602 -INTERNAL_ERROR = -32603 - -# -------------------------------------------------------------------- -# Exceptions - -## -# Base class for all kinds of client-side errors. - -class Error(Exception): - """Base class for client errors.""" - def __str__(self): - return repr(self) - -## -# Indicates an HTTP-level protocol error. This is raised by the HTTP -# transport layer, if the server returns an error code other than 200 -# (OK). -# -# @param url The target URL. -# @param errcode The HTTP error code. -# @param errmsg The HTTP error message. -# @param headers The HTTP header dictionary. - -class ProtocolError(Error): - """Indicates an HTTP protocol error.""" - def __init__(self, url, errcode, errmsg, headers): - Error.__init__(self) - self.url = url - self.errcode = errcode - self.errmsg = errmsg - self.headers = headers - def __repr__(self): - return ( - "" % - (self.url, self.errcode, self.errmsg) - ) - -## -# Indicates a broken XML-RPC response package. This exception is -# raised by the unmarshalling layer, if the XML-RPC response is -# malformed. - -class ResponseError(Error): - """Indicates a broken response package.""" - pass - -## -# Indicates an XML-RPC fault response package. This exception is -# raised by the unmarshalling layer, if the XML-RPC response contains -# a fault string. This exception can also used as a class, to -# generate a fault XML-RPC message. -# -# @param faultCode The XML-RPC fault code. -# @param faultString The XML-RPC fault string. - -class Fault(Error): - """Indicates an XML-RPC fault package.""" - def __init__(self, faultCode, faultString, **extra): - Error.__init__(self) - self.faultCode = faultCode - self.faultString = faultString - def __repr__(self): - return ( - "" % - (self.faultCode, repr(self.faultString)) - ) - -# -------------------------------------------------------------------- -# Special values - -## -# Wrapper for XML-RPC boolean values. Use the xmlrpclib.True and -# xmlrpclib.False constants, or the xmlrpclib.boolean() function, to -# generate boolean XML-RPC values. -# -# @param value A boolean value. Any true value is interpreted as True, -# all other values are interpreted as False. - -if _bool_is_builtin: - boolean = Boolean = bool #@UndefinedVariable - # to avoid breaking code which references xmlrpclib.{True,False} - True, False = True, False -else: - class Boolean: - """Boolean-value wrapper. - - Use True or False to generate a "boolean" XML-RPC value. - """ - - def __init__(self, value=0): - self.value = operator.truth(value) - - def encode(self, out): - out.write("%d\n" % self.value) - - def __cmp__(self, other): - if isinstance(other, Boolean): - other = other.value - return cmp(self.value, other) - - def __repr__(self): - if self.value: - return "" % id(self) - else: - return "" % id(self) - - def __int__(self): - return self.value - - def __nonzero__(self): - return self.value - - True, False = Boolean(1), Boolean(0) - - ## - # Map true or false value to XML-RPC boolean values. - # - # @def boolean(value) - # @param value A boolean value. Any true value is mapped to True, - # all other values are mapped to False. - # @return xmlrpclib.True or xmlrpclib.False. - # @see Boolean - # @see True - # @see False - - def boolean(value, _truefalse=(False, True)): - """Convert any Python value to XML-RPC 'boolean'.""" - return _truefalse[operator.truth(value)] - -## -# Wrapper for XML-RPC DateTime values. This converts a time value to -# the format used by XML-RPC. -#

-# The value can be given as a string in the format -# "yyyymmddThh:mm:ss", as a 9-item time tuple (as returned by -# time.localtime()), or an integer value (as returned by time.time()). -# The wrapper uses time.localtime() to convert an integer to a time -# tuple. -# -# @param value The time, given as an ISO 8601 string, a time -# tuple, or a integer time value. - -class DateTime: - """DateTime wrapper for an ISO 8601 string or time tuple or - localtime integer value to generate 'dateTime.iso8601' XML-RPC - value. - """ - - def __init__(self, value=0): - if not isinstance(value, StringType): - if datetime and isinstance(value, datetime.datetime): - self.value = value.strftime("%Y%m%dT%H:%M:%S") - return - if datetime and isinstance(value, datetime.date): - self.value = value.strftime("%Y%m%dT%H:%M:%S") - return - if datetime and isinstance(value, datetime.time): - today = datetime.datetime.now().strftime("%Y%m%d") - self.value = value.strftime(today + "T%H:%M:%S") - return - if not isinstance(value, (TupleType, time.struct_time)): #@UndefinedVariable - if value == 0: - value = time.time() - value = time.localtime(value) - value = time.strftime("%Y%m%dT%H:%M:%S", value) - self.value = value - - def __cmp__(self, other): - if isinstance(other, DateTime): - other = other.value - return cmp(self.value, other) - - ## - # Get date/time value. - # - # @return Date/time value, as an ISO 8601 string. - - def __str__(self): - return self.value - - def __repr__(self): - return "" % (repr(self.value), id(self)) - - def decode(self, data): - data = str(data) - self.value = string.strip(data) - - def encode(self, out): - out.write("") - out.write(self.value) - out.write("\n") - -def _datetime(data): - # decode xml element contents into a DateTime structure. - value = DateTime() - value.decode(data) - return value - -def _datetime_type(data): - t = time.strptime(data, "%Y%m%dT%H:%M:%S") #@UndefinedVariable - return datetime.datetime(*tuple(t)[:6]) - -## -# Wrapper for binary data. This can be used to transport any kind -# of binary data over XML-RPC, using BASE64 encoding. -# -# @param data An 8-bit string containing arbitrary data. - -import base64 -try: - import cStringIO as StringIO -except ImportError: - import StringIO - -class Binary: - """Wrapper for binary data.""" - - def __init__(self, data=None): - self.data = data - - ## - # Get buffer contents. - # - # @return Buffer contents, as an 8-bit string. - - def __str__(self): - return self.data or "" - - def __cmp__(self, other): - if isinstance(other, Binary): - other = other.data - return cmp(self.data, other) - - def decode(self, data): - self.data = base64.decodestring(data) - - def encode(self, out): - out.write("\n") - base64.encode(StringIO.StringIO(self.data), out) - out.write("\n") - -def _binary(data): - # decode xml element contents into a Binary structure - value = Binary() - value.decode(data) - return value - -WRAPPERS = (DateTime, Binary) -if not _bool_is_builtin: - WRAPPERS = WRAPPERS + (Boolean,) - -# -------------------------------------------------------------------- -# XML parsers - -try: - # optional xmlrpclib accelerator - import _xmlrpclib #@UnresolvedImport - FastParser = _xmlrpclib.Parser - FastUnmarshaller = _xmlrpclib.Unmarshaller -except (AttributeError, ImportError): - FastParser = FastUnmarshaller = None - -try: - import _xmlrpclib #@UnresolvedImport - FastMarshaller = _xmlrpclib.Marshaller -except (AttributeError, ImportError): - FastMarshaller = None - -# -# the SGMLOP parser is about 15x faster than Python's builtin -# XML parser. SGMLOP sources can be downloaded from: -# -# http://www.pythonware.com/products/xml/sgmlop.htm -# - -try: - import sgmlop - if not hasattr(sgmlop, "XMLParser"): - raise ImportError() -except ImportError: - SgmlopParser = None # sgmlop accelerator not available -else: - class SgmlopParser: - def __init__(self, target): - - # setup callbacks - self.finish_starttag = target.start - self.finish_endtag = target.end - self.handle_data = target.data - self.handle_xml = target.xml - - # activate parser - self.parser = sgmlop.XMLParser() - self.parser.register(self) - self.feed = self.parser.feed - self.entity = { - "amp": "&", "gt": ">", "lt": "<", - "apos": "'", "quot": '"' - } - - def close(self): - try: - self.parser.close() - finally: - self.parser = self.feed = None # remove circular reference - - def handle_proc(self, tag, attr): - m = re.search("encoding\s*=\s*['\"]([^\"']+)[\"']", attr) #@UndefinedVariable - if m: - self.handle_xml(m.group(1), 1) - - def handle_entityref(self, entity): - # entity - try: - self.handle_data(self.entity[entity]) - except KeyError: - self.handle_data("&%s;" % entity) - -try: - from xml.parsers import expat - if not hasattr(expat, "ParserCreate"): - raise ImportError() -except ImportError: - ExpatParser = None # expat not available -else: - class ExpatParser: - # fast expat parser for Python 2.0 and later. this is about - # 50% slower than sgmlop, on roundtrip testing - def __init__(self, target): - self._parser = parser = expat.ParserCreate(None, None) - self._target = target - parser.StartElementHandler = target.start - parser.EndElementHandler = target.end - parser.CharacterDataHandler = target.data - encoding = None - if not parser.returns_unicode: - encoding = "utf-8" - target.xml(encoding, None) - - def feed(self, data): - self._parser.Parse(data, 0) - - def close(self): - self._parser.Parse("", 1) # end of data - del self._target, self._parser # get rid of circular references - -class SlowParser: - """Default XML parser (based on xmllib.XMLParser).""" - # this is about 10 times slower than sgmlop, on roundtrip - # testing. - def __init__(self, target): - import xmllib # lazy subclassing (!) - if xmllib.XMLParser not in SlowParser.__bases__: - SlowParser.__bases__ = (xmllib.XMLParser,) - self.handle_xml = target.xml - self.unknown_starttag = target.start - self.handle_data = target.data - self.handle_cdata = target.data - self.unknown_endtag = target.end - try: - xmllib.XMLParser.__init__(self, accept_utf8=1) - except TypeError: - xmllib.XMLParser.__init__(self) # pre-2.0 - -# -------------------------------------------------------------------- -# XML-RPC marshalling and unmarshalling code - -## -# XML-RPC marshaller. -# -# @param encoding Default encoding for 8-bit strings. The default -# value is None (interpreted as UTF-8). -# @see dumps - -class Marshaller: - """Generate an XML-RPC params chunk from a Python data structure. - - Create a Marshaller instance for each set of parameters, and use - the "dumps" method to convert your data (represented as a tuple) - to an XML-RPC params chunk. To write a fault response, pass a - Fault instance instead. You may prefer to use the "dumps" module - function for this purpose. - """ - - # by the way, if you don't understand what's going on in here, - # that's perfectly ok. - - def __init__(self, encoding=None, allow_none=0): - self.memo = {} - self.data = None - self.encoding = encoding - self.allow_none = allow_none - - dispatch = {} - - def dumps(self, values): - out = [] - write = out.append - dump = self.__dump - if isinstance(values, Fault): - # fault instance - write("\n") - dump({'faultCode': values.faultCode, - 'faultString': values.faultString}, - write) - write("\n") - else: - # parameter block - # FIXME: the xml-rpc specification allows us to leave out - # the entire block if there are no parameters. - # however, changing this may break older code (including - # old versions of xmlrpclib.py), so this is better left as - # is for now. See @XMLRPC3 for more information. /F - write("\n") - for v in values: - write("\n") - dump(v, write) - write("\n") - write("\n") - result = string.join(out, "") - return result - - def __dump(self, value, write): - try: - f = self.dispatch[type(value)] - except KeyError: - raise TypeError("cannot marshal %s objects" % type(value)) - else: - f(self, value, write) - - def dump_nil (self, value, write): - if not self.allow_none: - raise TypeError("cannot marshal None unless allow_none is enabled") - write("") - dispatch[NoneType] = dump_nil - - def dump_int(self, value, write): - # in case ints are > 32 bits - if value > MAXINT or value < MININT: - raise OverflowError("int exceeds XML-RPC limits") - write("") - write(str(value)) - write("\n") - dispatch[IntType] = dump_int - - if _bool_is_builtin: - def dump_bool(self, value, write): - write("") - write(value and "1" or "0") - write("\n") - dispatch[bool] = dump_bool #@UndefinedVariable - - def dump_long(self, value, write): - if value > MAXINT or value < MININT: - raise OverflowError("long int exceeds XML-RPC limits") - write("") - write(str(int(value))) - write("\n") - dispatch[LongType] = dump_long - - def dump_double(self, value, write): - write("") - write(repr(value)) - write("\n") - dispatch[FloatType] = dump_double - - def dump_string(self, value, write, escape=escape): - write("") - write(escape(value)) - write("\n") - dispatch[StringType] = dump_string - - if unicode: - def dump_unicode(self, value, write, escape=escape): - value = value.encode(self.encoding) - write("") - write(escape(value)) - write("\n") - dispatch[UnicodeType] = dump_unicode - - def dump_array(self, value, write): - i = id(value) - if self.memo.has_key(i): - raise TypeError("cannot marshal recursive sequences") - self.memo[i] = None - dump = self.__dump - write("\n") - for v in value: - dump(v, write) - write("\n") - del self.memo[i] - dispatch[TupleType] = dump_array - dispatch[ListType] = dump_array - - def dump_struct(self, value, write, escape=escape): - i = id(value) - if self.memo.has_key(i): - raise TypeError("cannot marshal recursive dictionaries") - self.memo[i] = None - dump = self.__dump - write("\n") - for k, v in value.items(): - write("\n") - if type(k) is not StringType: - if unicode and type(k) is UnicodeType: - k = k.encode(self.encoding) - else: - raise TypeError("dictionary key must be string") - write("%s\n" % escape(k)) - dump(v, write) - write("\n") - write("\n") - del self.memo[i] - dispatch[DictType] = dump_struct - - if datetime: - def dump_datetime(self, value, write): - write("") - write(value.strftime("%Y%m%dT%H:%M:%S")) - write("\n") - dispatch[datetime.datetime] = dump_datetime - - def dump_date(self, value, write): - write("") - write(value.strftime("%Y%m%dT00:00:00")) - write("\n") - dispatch[datetime.date] = dump_date - - def dump_time(self, value, write): - write("") - write(datetime.datetime.now().date().strftime("%Y%m%dT")) - write(value.strftime("%H:%M:%S")) - write("\n") - dispatch[datetime.time] = dump_time - - def dump_instance(self, value, write): - # check for special wrappers - if value.__class__ in WRAPPERS: - self.write = write - value.encode(self) - del self.write - else: - # store instance attributes as a struct (really?) - self.dump_struct(value.__dict__, write) - dispatch[InstanceType] = dump_instance - -## -# XML-RPC unmarshaller. -# -# @see loads - -class Unmarshaller: - """Unmarshal an XML-RPC response, based on incoming XML event - messages (start, data, end). Call close() to get the resulting - data structure. - - Note that this reader is fairly tolerant, and gladly accepts bogus - XML-RPC data without complaining (but not bogus XML). - """ - - # and again, if you don't understand what's going on in here, - # that's perfectly ok. - - def __init__(self, use_datetime=0): - self._type = None - self._stack = [] - self._marks = [] - self._data = [] - self._methodname = None - self._encoding = "utf-8" - self.append = self._stack.append - self._use_datetime = use_datetime - if use_datetime and not datetime: - raise ValueError("the datetime module is not available") - - def close(self): - # return response tuple and target method - if self._type is None or self._marks: - raise ResponseError() - if self._type == "fault": - raise Fault(**self._stack[0]) - return tuple(self._stack) - - def getmethodname(self): - return self._methodname - - # - # event handlers - - def xml(self, encoding, standalone): - self._encoding = encoding - # FIXME: assert standalone == 1 ??? - - def start(self, tag, attrs): - # prepare to handle this element - if tag == "array" or tag == "struct": - self._marks.append(len(self._stack)) - self._data = [] - self._value = (tag == "value") - - def data(self, text): - self._data.append(text) - - def end(self, tag, join=string.join): - # call the appropriate end tag handler - try: - f = self.dispatch[tag] - except KeyError: - pass # unknown tag ? - else: - return f(self, join(self._data, "")) - - # - # accelerator support - - def end_dispatch(self, tag, data): - # dispatch data - try: - f = self.dispatch[tag] - except KeyError: - pass # unknown tag ? - else: - return f(self, data) - - # - # element decoders - - dispatch = {} - - def end_nil (self, data): - self.append(None) - self._value = 0 - dispatch["nil"] = end_nil - - def end_boolean(self, data): - if data == "0": - self.append(False) - elif data == "1": - self.append(True) - else: - raise TypeError("bad boolean value") - self._value = 0 - dispatch["boolean"] = end_boolean - - def end_int(self, data): - self.append(int(data)) - self._value = 0 - dispatch["i4"] = end_int - dispatch["int"] = end_int - - def end_double(self, data): - self.append(float(data)) - self._value = 0 - dispatch["double"] = end_double - - def end_string(self, data): - if self._encoding: - data = _decode(data, self._encoding) - self.append(_stringify(data)) - self._value = 0 - dispatch["string"] = end_string - dispatch["name"] = end_string # struct keys are always strings - - def end_array(self, data): - mark = self._marks.pop() - # map arrays to Python lists - self._stack[mark:] = [self._stack[mark:]] - self._value = 0 - dispatch["array"] = end_array - - def end_struct(self, data): - mark = self._marks.pop() - # map structs to Python dictionaries - dict = {} - items = self._stack[mark:] - for i in range(0, len(items), 2): - dict[_stringify(items[i])] = items[i + 1] - self._stack[mark:] = [dict] - self._value = 0 - dispatch["struct"] = end_struct - - def end_base64(self, data): - value = Binary() - value.decode(data) - self.append(value) - self._value = 0 - dispatch["base64"] = end_base64 - - def end_dateTime(self, data): - value = DateTime() - value.decode(data) - if self._use_datetime: - value = _datetime_type(data) - self.append(value) - dispatch["dateTime.iso8601"] = end_dateTime - - def end_value(self, data): - # if we stumble upon a value element with no internal - # elements, treat it as a string element - if self._value: - self.end_string(data) - dispatch["value"] = end_value - - def end_params(self, data): - self._type = "params" - dispatch["params"] = end_params - - def end_fault(self, data): - self._type = "fault" - dispatch["fault"] = end_fault - - def end_methodName(self, data): - if self._encoding: - data = _decode(data, self._encoding) - self._methodname = data - self._type = "methodName" # no params - dispatch["methodName"] = end_methodName - -## Multicall support -# - -class _MultiCallMethod: - # some lesser magic to store calls made to a MultiCall object - # for batch execution - def __init__(self, call_list, name): - self.__call_list = call_list - self.__name = name - def __getattr__(self, name): - return _MultiCallMethod(self.__call_list, "%s.%s" % (self.__name, name)) - def __call__(self, *args): - self.__call_list.append((self.__name, args)) - -class MultiCallIterator: - """Iterates over the results of a multicall. Exceptions are - thrown in response to xmlrpc faults.""" - - def __init__(self, results): - self.results = results - - def __getitem__(self, i): - item = self.results[i] - if type(item) == type({}): - raise Fault(item['faultCode'], item['faultString']) - elif type(item) == type([]): - return item[0] - else: - raise ValueError("unexpected type in multicall result") - -class MultiCall: - """server -> a object used to boxcar method calls - - server should be a ServerProxy object. - - Methods can be added to the MultiCall using normal - method call syntax e.g.: - - multicall = MultiCall(server_proxy) - multicall.add(2,3) - multicall.get_address("Guido") - - To execute the multicall, call the MultiCall object e.g.: - - add_result, address = multicall() - """ - - def __init__(self, server): - self.__server = server - self.__call_list = [] - - def __repr__(self): - return "" % id(self) - - __str__ = __repr__ - - def __getattr__(self, name): - return _MultiCallMethod(self.__call_list, name) - - def __call__(self): - marshalled_list = [] - for name, args in self.__call_list: - marshalled_list.append({'methodName' : name, 'params' : args}) - - return MultiCallIterator(self.__server.system.multicall(marshalled_list)) - -# -------------------------------------------------------------------- -# convenience functions - -## -# Create a parser object, and connect it to an unmarshalling instance. -# This function picks the fastest available XML parser. -# -# return A (parser, unmarshaller) tuple. - -def getparser(use_datetime=0): - """getparser() -> parser, unmarshaller - - Create an instance of the fastest available parser, and attach it - to an unmarshalling object. Return both objects. - """ - if use_datetime and not datetime: - raise ValueError("the datetime module is not available") - if FastParser and FastUnmarshaller: - if use_datetime: - mkdatetime = _datetime_type - else: - mkdatetime = _datetime - target = FastUnmarshaller(True, False, _binary, mkdatetime, Fault) - parser = FastParser(target) - else: - target = Unmarshaller(use_datetime=use_datetime) - if FastParser: - parser = FastParser(target) - elif SgmlopParser: - parser = SgmlopParser(target) - elif ExpatParser: - parser = ExpatParser(target) - else: - parser = SlowParser(target) - return parser, target - -## -# Convert a Python tuple or a Fault instance to an XML-RPC packet. -# -# @def dumps(params, **options) -# @param params A tuple or Fault instance. -# @keyparam methodname If given, create a methodCall request for -# this method name. -# @keyparam methodresponse If given, create a methodResponse packet. -# If used with a tuple, the tuple must be a singleton (that is, -# it must contain exactly one element). -# @keyparam encoding The packet encoding. -# @return A string containing marshalled data. - -def dumps(params, methodname=None, methodresponse=None, encoding=None, - allow_none=0): - """data [,options] -> marshalled data - - Convert an argument tuple or a Fault instance to an XML-RPC - request (or response, if the methodresponse option is used). - - In addition to the data object, the following options can be given - as keyword arguments: - - methodname: the method name for a methodCall packet - - methodresponse: true to create a methodResponse packet. - If this option is used with a tuple, the tuple must be - a singleton (i.e. it can contain only one element). - - encoding: the packet encoding (default is UTF-8) - - All 8-bit strings in the data structure are assumed to use the - packet encoding. Unicode strings are automatically converted, - where necessary. - """ - - assert isinstance(params, TupleType) or isinstance(params, Fault), \ - "argument must be tuple or Fault instance" - - if isinstance(params, Fault): - methodresponse = 1 - elif methodresponse and isinstance(params, TupleType): - assert len(params) == 1, "response tuple must be a singleton" - - if not encoding: - encoding = "utf-8" - - if FastMarshaller: - m = FastMarshaller(encoding) - else: - m = Marshaller(encoding, allow_none) - - data = m.dumps(params) - - if encoding != "utf-8": - xmlheader = "\n" % str(encoding) - else: - xmlheader = "\n" # utf-8 is default - - # standard XML-RPC wrappings - if methodname: - # a method call - if not isinstance(methodname, StringType): - methodname = methodname.encode(encoding) - data = ( - xmlheader, - "\n" - "", methodname, "\n", - data, - "\n" - ) - elif methodresponse: - # a method response, or a fault structure - data = ( - xmlheader, - "\n", - data, - "\n" - ) - else: - return data # return as is - return string.join(data, "") - -## -# Convert an XML-RPC packet to a Python object. If the XML-RPC packet -# represents a fault condition, this function raises a Fault exception. -# -# @param data An XML-RPC packet, given as an 8-bit string. -# @return A tuple containing the unpacked data, and the method name -# (None if not present). -# @see Fault - -def loads(data, use_datetime=0): - """data -> unmarshalled data, method name - - Convert an XML-RPC packet to unmarshalled data plus a method - name (None if not present). - - If the XML-RPC packet represents a fault condition, this function - raises a Fault exception. - """ - p, u = getparser(use_datetime=use_datetime) - p.feed(data) - p.close() - return u.close(), u.getmethodname() - - -# -------------------------------------------------------------------- -# request dispatcher - -class _Method: - # some magic to bind an XML-RPC method to an RPC server. - # supports "nested" methods (e.g. examples.getStateName) - def __init__(self, send, name): - self.__send = send - self.__name = name - def __getattr__(self, name): - return _Method(self.__send, "%s.%s" % (self.__name, name)) - def __call__(self, *args): - return self.__send(self.__name, args) - -## -# Standard transport class for XML-RPC over HTTP. -#

-# You can create custom transports by subclassing this method, and -# overriding selected methods. - -class Transport: - """Handles an HTTP transaction to an XML-RPC server.""" - - # client identifier (may be overridden) - user_agent = "xmlrpclib.py/%s (by www.pythonware.com)" % __version__ - - def __init__(self, use_datetime=0): - self._use_datetime = use_datetime - - ## - # Send a complete request, and parse the response. - # - # @param host Target host. - # @param handler Target PRC handler. - # @param request_body XML-RPC request body. - # @param verbose Debugging flag. - # @return Parsed response. - - def request(self, host, handler, request_body, verbose=0): - # issue XML-RPC request - - h = self.make_connection(host) - if verbose: - h.set_debuglevel(1) - - self.send_request(h, handler, request_body) - self.send_host(h, host) - self.send_user_agent(h) - self.send_content(h, request_body) - - errcode, errmsg, headers = h.getreply() - - if errcode != 200: - raise ProtocolError( - host + handler, - errcode, errmsg, - headers - ) - - self.verbose = verbose - - try: - sock = h._conn.sock - except AttributeError: - sock = None - - return self._parse_response(h.getfile(), sock) - - ## - # Create parser. - # - # @return A 2-tuple containing a parser and a unmarshaller. - - def getparser(self): - # get parser and unmarshaller - return getparser(use_datetime=self._use_datetime) - - ## - # Get authorization info from host parameter - # Host may be a string, or a (host, x509-dict) tuple; if a string, - # it is checked for a "user:pw@host" format, and a "Basic - # Authentication" header is added if appropriate. - # - # @param host Host descriptor (URL or (URL, x509 info) tuple). - # @return A 3-tuple containing (actual host, extra headers, - # x509 info). The header and x509 fields may be None. - - def get_host_info(self, host): - - x509 = {} - if isinstance(host, TupleType): - host, x509 = host - - import urllib - auth, host = urllib.splituser(host) - - if auth: - import base64 - auth = base64.encodestring(urllib.unquote(auth)) - auth = string.join(string.split(auth), "") # get rid of whitespace - extra_headers = [ - ("Authorization", "Basic " + auth) - ] - else: - extra_headers = None - - return host, extra_headers, x509 - - ## - # Connect to server. - # - # @param host Target host. - # @return A connection handle. - - def make_connection(self, host): - # create a HTTP connection object from a host descriptor - import httplib - host, extra_headers, x509 = self.get_host_info(host) - return httplib.HTTP(host) - - ## - # Send request header. - # - # @param connection Connection handle. - # @param handler Target RPC handler. - # @param request_body XML-RPC body. - - def send_request(self, connection, handler, request_body): - connection.putrequest("POST", handler) - - ## - # Send host name. - # - # @param connection Connection handle. - # @param host Host name. - - def send_host(self, connection, host): - host, extra_headers, x509 = self.get_host_info(host) - connection.putheader("Host", host) - if extra_headers: - if isinstance(extra_headers, DictType): - extra_headers = extra_headers.items() - for key, value in extra_headers: - connection.putheader(key, value) - - ## - # Send user-agent identifier. - # - # @param connection Connection handle. - - def send_user_agent(self, connection): - connection.putheader("User-Agent", self.user_agent) - - ## - # Send request body. - # - # @param connection Connection handle. - # @param request_body XML-RPC request body. - - def send_content(self, connection, request_body): - connection.putheader("Content-Type", "text/xml") - connection.putheader("Content-Length", str(len(request_body))) - connection.endheaders() - if request_body: - connection.send(request_body) - - ## - # Parse response. - # - # @param file Stream. - # @return Response tuple and target method. - - def parse_response(self, file): - # compatibility interface - return self._parse_response(file, None) - - ## - # Parse response (alternate interface). This is similar to the - # parse_response method, but also provides direct access to the - # underlying socket object (where available). - # - # @param file Stream. - # @param sock Socket handle (or None, if the socket object - # could not be accessed). - # @return Response tuple and target method. - - def _parse_response(self, file, sock): - # read response from input file/socket, and parse it - - p, u = self.getparser() - - while 1: - if sock: - response = sock.recv(1024) - else: - response = file.read(1024) - if not response: - break - if self.verbose: - sys.stdout.write("body: %s\n" % repr(response)) - p.feed(response) - - file.close() - p.close() - - return u.close() - -## -# Standard transport class for XML-RPC over HTTPS. - -class SafeTransport(Transport): - """Handles an HTTPS transaction to an XML-RPC server.""" - - # FIXME: mostly untested - - def make_connection(self, host): - # create a HTTPS connection object from a host descriptor - # host may be a string, or a (host, x509-dict) tuple - import httplib - host, extra_headers, x509 = self.get_host_info(host) - try: - HTTPS = httplib.HTTPS - except AttributeError: - raise NotImplementedError( - "your version of httplib doesn't support HTTPS" - ) - else: - return HTTPS(host, None, **(x509 or {})) - -## -# Standard server proxy. This class establishes a virtual connection -# to an XML-RPC server. -#

-# This class is available as ServerProxy and Server. New code should -# use ServerProxy, to avoid confusion. -# -# @def ServerProxy(uri, **options) -# @param uri The connection point on the server. -# @keyparam transport A transport factory, compatible with the -# standard transport class. -# @keyparam encoding The default encoding used for 8-bit strings -# (default is UTF-8). -# @keyparam verbose Use a true value to enable debugging output. -# (printed to standard output). -# @see Transport - -class ServerProxy: - """uri [,options] -> a logical connection to an XML-RPC server - - uri is the connection point on the server, given as - scheme://host/target. - - The standard implementation always supports the "http" scheme. If - SSL socket support is available (Python 2.0), it also supports - "https". - - If the target part and the slash preceding it are both omitted, - "/RPC2" is assumed. - - The following options can be given as keyword arguments: - - transport: a transport factory - encoding: the request encoding (default is UTF-8) - - All 8-bit strings passed to the server proxy are assumed to use - the given encoding. - """ - - def __init__(self, uri, transport=None, encoding=None, verbose=0, - allow_none=0, use_datetime=0): - # establish a "logical" server connection - - # get the url - import urllib - type, uri = urllib.splittype(uri) - if type not in ("http", "https"): - raise IOError("unsupported XML-RPC protocol") - self.__host, self.__handler = urllib.splithost(uri) - if not self.__handler: - self.__handler = "/RPC2" - - if transport is None: - if type == "https": - transport = SafeTransport(use_datetime=use_datetime) - else: - transport = Transport(use_datetime=use_datetime) - self.__transport = transport - - self.__encoding = encoding - self.__verbose = verbose - self.__allow_none = allow_none - - def __request(self, methodname, params): - # call a method on the remote server - - request = dumps(params, methodname, encoding=self.__encoding, - allow_none=self.__allow_none) - - response = self.__transport.request( - self.__host, - self.__handler, - request, - verbose=self.__verbose - ) - - if len(response) == 1: - response = response[0] - - return response - - def __repr__(self): - return ( - "" % - (self.__host, self.__handler) - ) - - __str__ = __repr__ - - def __getattr__(self, name): - # magic method dispatcher - return _Method(self.__request, name) - - # note: to call a remote object with an non-standard name, use - # result getattr(server, "strange-python-name")(args) - -# compatibility - -Server = ServerProxy - -# -------------------------------------------------------------------- -# test code - -if __name__ == "__main__": - - # simple test program (from the XML-RPC specification) - - # server = ServerProxy("http://localhost:8000") # local server - server = ServerProxy("http://time.xmlrpc.com/RPC2") - - sys.stdout.write('%s\n' % server) - - try: - sys.stdout.write('%s\n' % (server.currentTime.getCurrentTime(),)) - except Error: - import traceback;traceback.print_exc() - - multi = MultiCall(server) - multi.currentTime.getCurrentTime() - multi.currentTime.getCurrentTime() - try: - for response in multi(): - sys.stdout.write('%s\n' % (response,)) - except Error: - import traceback;traceback.print_exc() diff --git a/src/debugpy/_vendored/pydevd/_pydev_runfiles/pydev_runfiles_nose.py b/src/debugpy/_vendored/pydevd/_pydev_runfiles/pydev_runfiles_nose.py index 29229b80..20ea5b29 100644 --- a/src/debugpy/_vendored/pydevd/_pydev_runfiles/pydev_runfiles_nose.py +++ b/src/debugpy/_vendored/pydevd/_pydev_runfiles/pydev_runfiles_nose.py @@ -5,6 +5,8 @@ from _pydev_runfiles import pydev_runfiles_xml_rpc import time from _pydev_runfiles.pydev_runfiles_coverage import start_coverage_support from contextlib import contextmanager +from io import StringIO +import traceback #======================================================================================================================= @@ -91,7 +93,7 @@ class PydevPlugin(Plugin): address = f, '?' except: sys.stderr.write("PyDev: Internal pydev error getting test address. Please report at the pydev bug tracker\n") - import traceback;traceback.print_exc() + traceback.print_exc() sys.stderr.write("\n\n\n") address = '?', '?' return address @@ -121,15 +123,11 @@ class PydevPlugin(Plugin): if len(err) != 3: if len(err) == 2: return err[1] # multiprocess - try: - from StringIO import StringIO - except: - from io import StringIO s = StringIO() etype, value, tb = err if isinstance(value, str): return value - import traceback;traceback.print_exception(etype, value, tb, file=s) + traceback.print_exception(etype, value, tb, file=s) return s.getvalue() return err diff --git a/src/debugpy/_vendored/pydevd/_pydev_runfiles/pydev_runfiles_xml_rpc.py b/src/debugpy/_vendored/pydevd/_pydev_runfiles/pydev_runfiles_xml_rpc.py index ff8e4b36..b4d6b5c1 100644 --- a/src/debugpy/_vendored/pydevd/_pydev_runfiles/pydev_runfiles_xml_rpc.py +++ b/src/debugpy/_vendored/pydevd/_pydev_runfiles/pydev_runfiles_xml_rpc.py @@ -5,18 +5,18 @@ import warnings from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding from _pydev_bundle.pydev_imports import xmlrpclib, _queue -from _pydevd_bundle.pydevd_constants import Null, IS_PY3K +from _pydevd_bundle.pydevd_constants import Null Queue = _queue.Queue -#This may happen in IronPython (in Python it shouldn't happen as there are -#'fast' replacements that are used in xmlrpclib.py) +# This may happen in IronPython (in Python it shouldn't happen as there are +# 'fast' replacements that are used in xmlrpclib.py) warnings.filterwarnings( 'ignore', 'The xmllib module is obsolete.*', DeprecationWarning) - file_system_encoding = getfilesystemencoding() + #======================================================================================================================= # _ServerHolder #======================================================================================================================= @@ -34,7 +34,6 @@ def set_server(server): _ServerHolder.SERVER = server - #======================================================================================================================= # ParallelNotification #======================================================================================================================= @@ -48,7 +47,6 @@ class ParallelNotification(object): return self.method, self.args - #======================================================================================================================= # KillServer #======================================================================================================================= @@ -61,26 +59,21 @@ class KillServer(object): #======================================================================================================================= class ServerFacade(object): - def __init__(self, notifications_queue): self.notifications_queue = notifications_queue - def notifyTestsCollected(self, *args): self.notifications_queue.put_nowait(ParallelNotification('notifyTestsCollected', args)) def notifyConnected(self, *args): self.notifications_queue.put_nowait(ParallelNotification('notifyConnected', args)) - def notifyTestRunFinished(self, *args): self.notifications_queue.put_nowait(ParallelNotification('notifyTestRunFinished', args)) - def notifyStartTest(self, *args): self.notifications_queue.put_nowait(ParallelNotification('notifyStartTest', args)) - def notifyTest(self, *args): new_args = [] for arg in args: @@ -89,19 +82,14 @@ class ServerFacade(object): self.notifications_queue.put_nowait(ParallelNotification('notifyTest', args)) - - - #======================================================================================================================= # ServerComm #======================================================================================================================= class ServerComm(threading.Thread): - - def __init__(self, notifications_queue, port, daemon=False): threading.Thread.__init__(self) - self.setDaemon(daemon) # If False, wait for all the notifications to be passed before exiting! + self.setDaemon(daemon) # If False, wait for all the notifications to be passed before exiting! self.finished = False self.notifications_queue = notifications_queue @@ -126,7 +114,6 @@ class ServerComm(threading.Thread): self.server = xmlrpclib.Server('http://%s:%s' % (pydev_localhost.get_localhost(), port), encoding=encoding) - def run(self): while True: kill_found = False @@ -140,15 +127,14 @@ class ServerComm(threading.Thread): try: while True: - command = self.notifications_queue.get(block=False) #No block to create a batch. + command = self.notifications_queue.get(block=False) # No block to create a batch. if isinstance(command, KillServer): kill_found = True else: assert isinstance(command, ParallelNotification) commands.append(command.to_tuple()) except: - pass #That's OK, we're getting it until it becomes empty so that we notify multiple at once. - + pass # That's OK, we're getting it until it becomes empty so that we notify multiple at once. if commands: try: @@ -161,7 +147,6 @@ class ServerComm(threading.Thread): return - #======================================================================================================================= # initialize_server #======================================================================================================================= @@ -173,7 +158,7 @@ def initialize_server(port, daemon=False): _ServerHolder.SERVER_COMM = ServerComm(notifications_queue, port, daemon) _ServerHolder.SERVER_COMM.start() else: - #Create a null server, so that we keep the interface even without any connection. + # Create a null server, so that we keep the interface even without any connection. _ServerHolder.SERVER = Null() _ServerHolder.SERVER_COMM = Null() @@ -183,7 +168,6 @@ def initialize_server(port, daemon=False): traceback.print_exc() - #======================================================================================================================= # notifyTest #======================================================================================================================= @@ -205,7 +189,7 @@ def notifyStartTest(file, test): ''' assert file is not None if test is None: - test = '' #Could happen if we have an import error importing module. + test = '' # Could happen if we have an import error importing module. try: _ServerHolder.SERVER.notifyStartTest(file, test) @@ -215,26 +199,15 @@ def notifyStartTest(file, test): def _encode_if_needed(obj): # In the java side we expect strings to be ISO-8859-1 (org.python.pydev.debug.pyunit.PyUnitServer.initializeDispatches().new Dispatch() {...}.getAsStr(Object)) - if not IS_PY3K: - if isinstance(obj, str): - try: - return xmlrpclib.Binary(obj.decode(sys.stdin.encoding).encode('ISO-8859-1', 'xmlcharrefreplace')) - except: - return xmlrpclib.Binary(obj) + if isinstance(obj, str): # Unicode in py3 + return xmlrpclib.Binary(obj.encode('ISO-8859-1', 'xmlcharrefreplace')) - elif isinstance(obj, unicode): - return xmlrpclib.Binary(obj.encode('ISO-8859-1', 'xmlcharrefreplace')) + elif isinstance(obj, bytes): + try: + return xmlrpclib.Binary(obj.decode(sys.stdin.encoding).encode('ISO-8859-1', 'xmlcharrefreplace')) + except: + return xmlrpclib.Binary(obj) # bytes already - else: - if isinstance(obj, str): # Unicode in py3 - return xmlrpclib.Binary(obj.encode('ISO-8859-1', 'xmlcharrefreplace')) - - elif isinstance(obj, bytes): - try: - return xmlrpclib.Binary(obj.decode(sys.stdin.encoding).encode('ISO-8859-1', 'xmlcharrefreplace')) - except: - return xmlrpclib.Binary(obj) #bytes already - return obj @@ -255,7 +228,7 @@ def notifyTest(cond, captured_output, error_contents, file, test, time): assert error_contents is not None assert file is not None if test is None: - test = '' #Could happen if we have an import error importing module. + test = '' # Could happen if we have an import error importing module. assert time is not None try: captured_output = _encode_if_needed(captured_output) @@ -265,6 +238,7 @@ def notifyTest(cond, captured_output, error_contents, file, test, time): except: traceback.print_exc() + #======================================================================================================================= # notifyTestRunFinished #======================================================================================================================= diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_api.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_api.py index b011f29e..cdd484c7 100644 --- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_api.py +++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_api.py @@ -15,7 +15,7 @@ from _pydevd_bundle.pydevd_comm import (InternalGetThreadStack, internal_get_com from _pydevd_bundle.pydevd_comm_constants import (CMD_THREAD_SUSPEND, file_system_encoding, CMD_STEP_INTO_MY_CODE, CMD_STOP_ON_START, CMD_SMART_STEP_INTO) from _pydevd_bundle.pydevd_constants import (get_current_thread_id, set_protocol, get_protocol, - HTTP_JSON_PROTOCOL, JSON_PROTOCOL, IS_PY3K, DebugInfoHolder, dict_keys, dict_items, IS_WINDOWS) + HTTP_JSON_PROTOCOL, JSON_PROTOCOL, DebugInfoHolder, IS_WINDOWS) from _pydevd_bundle.pydevd_net_command_factory_json import NetCommandFactoryJson from _pydevd_bundle.pydevd_net_command_factory_xml import NetCommandFactory import pydevd_file_utils @@ -329,10 +329,7 @@ class PyDevdAPI(object): -- in py3 raises an error if it's not str already. ''' if s.__class__ != str: - if not IS_PY3K: - s = s.encode('utf-8') - else: - raise AssertionError('Expected to have str on Python 3. Found: %s (%s)' % (s, s.__class__)) + raise AssertionError('Expected to have str on Python 3. Found: %s (%s)' % (s, s.__class__)) return s def filename_to_str(self, filename): @@ -341,10 +338,7 @@ class PyDevdAPI(object): -- in py3 raises an error if it's not str already. ''' if filename.__class__ != str: - if not IS_PY3K: - filename = filename.encode(file_system_encoding) - else: - raise AssertionError('Expected to have str on Python 3. Found: %s (%s)' % (filename, filename.__class__)) + raise AssertionError('Expected to have str on Python 3. Found: %s (%s)' % (filename, filename.__class__)) return filename def filename_to_server(self, filename): @@ -578,9 +572,9 @@ class PyDevdAPI(object): translations are applied). ''' pydev_log.debug('Reapplying breakpoints.') - items = dict_items(py_db.api_received_breakpoints) # Create a copy with items to reapply. + values = list(py_db.api_received_breakpoints.values()) # Create a copy with items to reapply. self.remove_all_breakpoints(py_db, '*') - for _key, val in items: + for val in values: _new_filename, api_add_breakpoint_params = val self.add_breakpoint(py_db, *api_add_breakpoint_params) @@ -614,7 +608,7 @@ class PyDevdAPI(object): changed = True else: - items = dict_items(py_db.api_received_breakpoints) # Create a copy to remove items. + items = list(py_db.api_received_breakpoints.items()) # Create a copy to remove items. translated_filenames = [] for key, val in items: original_filename, _breakpoint_id = key @@ -644,7 +638,7 @@ class PyDevdAPI(object): :param int breakpoint_id: ''' - for key, val in dict_items(py_db.api_received_breakpoints): + for key, val in list(py_db.api_received_breakpoints.items()): original_filename, existing_breakpoint_id = key _new_filename, _api_add_breakpoint_params = val if received_filename == original_filename and existing_breakpoint_id == breakpoint_id: @@ -687,7 +681,7 @@ class PyDevdAPI(object): except KeyError: pydev_log.info("Error removing breakpoint: Breakpoint id not found: %s id: %s. Available ids: %s\n", - canonical_normalized_filename, breakpoint_id, dict_keys(id_to_pybreakpoint)) + canonical_normalized_filename, breakpoint_id, list(id_to_pybreakpoint)) py_db.on_breakpoints_changed(removed=True) diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_breakpoints.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_breakpoints.py index 29cb7ff4..1f14aae1 100644 --- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_breakpoints.py +++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_breakpoints.py @@ -1,4 +1,3 @@ -from _pydevd_bundle.pydevd_constants import dict_iter_values, IS_PY24 from _pydev_bundle import pydev_log from _pydevd_bundle import pydevd_import_class from _pydevd_bundle.pydevd_frame_utils import add_exception_to_frame @@ -119,7 +118,7 @@ def get_exception_breakpoint(exctype, exceptions): try: return exceptions[exception_full_qname] except KeyError: - for exception_breakpoint in dict_iter_values(exceptions): + for exception_breakpoint in exceptions.values(): if exception_breakpoint.type is not None and issubclass(exctype, exception_breakpoint.type): if exc is None or issubclass(exception_breakpoint.type, exc.type): exc = exception_breakpoint @@ -179,9 +178,6 @@ def stop_on_unhandled_exception(py_db, thread, additional_info, arg): def get_exception_class(kls): - if IS_PY24 and "BaseException" == kls: - kls = "Exception" - try: return eval(kls) except: diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_bytecode_utils.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_bytecode_utils.py index 8f2f5dad..d84fc9d6 100644 --- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_bytecode_utils.py +++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_bytecode_utils.py @@ -4,10 +4,6 @@ Bytecode analysing utils. Originally added for using in smart step into. Note: not importable from Python 2. """ -import sys -if sys.version_info[0] < 3: - raise ImportError('This module is only compatible with Python 3.') - from _pydev_bundle import pydev_log from types import CodeType from _pydevd_frame_eval.vendored.bytecode.instr import _Variable diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_code_to_source.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_code_to_source.py index 848af188..a505d876 100644 --- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_code_to_source.py +++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_code_to_source.py @@ -8,15 +8,12 @@ Note: this is a work in progress / proof of concept / not ready to be used. import dis from _pydevd_bundle.pydevd_collect_bytecode_info import iter_instructions -from _pydevd_bundle.pydevd_constants import dict_iter_items, IS_PY2 from _pydev_bundle import pydev_log import sys import inspect +from io import StringIO -try: - xrange = xrange -except: - xrange = range +xrange = range class _Stack(object): @@ -354,110 +351,90 @@ class _CallFunction(_BaseHandler): self.stack.push(self) -if IS_PY2: +@_register +class _MakeFunctionPy3(_BaseHandler): + """ + Pushes a new function object on the stack. From bottom to top, the consumed stack must consist + of values if the argument carries a specified flag value - @_register - class _MakeFunctionPy2(_BaseHandler): - """ - Pushes a new function object on the stack. TOS is the code associated with the function. The - function object is defined to have argc default parameters, which are found below TOS. - """ + 0x01 a tuple of default values for positional-only and positional-or-keyword parameters in positional order - opname = "MAKE_FUNCTION" + 0x02 a dictionary of keyword-only parameters' default values - def _handle(self): - stack = self.stack - self.code = stack.pop() + 0x04 an annotation dictionary - stack.push(self) + 0x08 a tuple containing cells for free variables, making a closure - _MakeFunction = _MakeFunctionPy2 + the code associated with the function (at TOS1) -else: + the qualified name of the function (at TOS) + """ - @_register - class _MakeFunctionPy3(_BaseHandler): - """ - Pushes a new function object on the stack. From bottom to top, the consumed stack must consist - of values if the argument carries a specified flag value + opname = "MAKE_FUNCTION" + is_lambda = False - 0x01 a tuple of default values for positional-only and positional-or-keyword parameters in positional order + def _handle(self): + stack = self.stack + self.qualified_name = stack.pop() + self.code = stack.pop() - 0x02 a dictionary of keyword-only parameters' default values + default_node = None + if self.instruction.argval & 0x01: + default_node = stack.pop() - 0x04 an annotation dictionary + is_lambda = self.is_lambda = '' in [x.tok for x in self.qualified_name.tokens] - 0x08 a tuple containing cells for free variables, making a closure - - the code associated with the function (at TOS1) - - the qualified name of the function (at TOS) - """ - - opname = "MAKE_FUNCTION" - is_lambda = False - - def _handle(self): - stack = self.stack - self.qualified_name = stack.pop() - self.code = stack.pop() - - default_node = None - if self.instruction.argval & 0x01: - default_node = stack.pop() - - is_lambda = self.is_lambda = '' in [x.tok for x in self.qualified_name.tokens] + if not is_lambda: + def_token = _Token(self.i_line, None, 'def ') + self.tokens.append(def_token) + for token in self.qualified_name.tokens: + self.tokens.append(token) if not is_lambda: - def_token = _Token(self.i_line, None, 'def ') - self.tokens.append(def_token) + token.mark_after(def_token) + prev = token - for token in self.qualified_name.tokens: - self.tokens.append(token) - if not is_lambda: - token.mark_after(def_token) - prev = token + open_parens_token = _Token(self.i_line, None, '(', after=prev) + self.tokens.append(open_parens_token) + prev = open_parens_token - open_parens_token = _Token(self.i_line, None, '(', after=prev) - self.tokens.append(open_parens_token) - prev = open_parens_token + code = self.code.instruction.argval - code = self.code.instruction.argval + if default_node: + defaults = ([_SENTINEL] * (len(code.co_varnames) - len(default_node.instruction.argval))) + list(default_node.instruction.argval) + else: + defaults = [_SENTINEL] * len(code.co_varnames) - if default_node: - defaults = ([_SENTINEL] * (len(code.co_varnames) - len(default_node.instruction.argval))) + list(default_node.instruction.argval) - else: - defaults = [_SENTINEL] * len(code.co_varnames) + for i, arg in enumerate(code.co_varnames): + if i > 0: + comma_token = _Token(prev.i_line, None, ', ', after=prev) + self.tokens.append(comma_token) + prev = comma_token - for i, arg in enumerate(code.co_varnames): - if i > 0: - comma_token = _Token(prev.i_line, None, ', ', after=prev) - self.tokens.append(comma_token) - prev = comma_token + arg_token = _Token(self.i_line, None, arg, after=prev) + self.tokens.append(arg_token) - arg_token = _Token(self.i_line, None, arg, after=prev) - self.tokens.append(arg_token) + default = defaults[i] + if default is not _SENTINEL: + eq_token = _Token(default_node.i_line, None, '=', after=prev) + self.tokens.append(eq_token) + prev = eq_token - default = defaults[i] - if default is not _SENTINEL: - eq_token = _Token(default_node.i_line, None, '=', after=prev) - self.tokens.append(eq_token) - prev = eq_token + default_token = _Token(default_node.i_line, None, str(default), after=prev) + self.tokens.append(default_token) + prev = default_token - default_token = _Token(default_node.i_line, None, str(default), after=prev) - self.tokens.append(default_token) - prev = default_token + tok_close_parens = _Token(prev.i_line, None, '):', after=prev) + self.tokens.append(tok_close_parens) - tok_close_parens = _Token(prev.i_line, None, '):', after=prev) - self.tokens.append(tok_close_parens) + self._write_tokens() - self._write_tokens() + stack.push(self) + self.writer.indent(prev.i_line + 1) + self.writer.dedent(max(self.disassembler.merge_code(code))) - stack.push(self) - self.writer.indent(prev.i_line + 1) - self.writer.dedent(max(self.disassembler.merge_code(code))) - _MakeFunction = _MakeFunctionPy3 +_MakeFunction = _MakeFunctionPy3 def _print_after_info(line_contents, stream=None): @@ -518,10 +495,6 @@ def _compose_line_contents(line_contents, previous_line_tokens): if token not in handled: lst.append(token.tok) - try: - from StringIO import StringIO - except: - from io import StringIO stream = StringIO() _print_after_info(line_contents, stream) pydev_log.critical('Error. After markers are not correct:\n%s', stream.getvalue()) @@ -577,7 +550,7 @@ class _PyCodeToSource(object): # print(d, getattr(code, d)) line_to_contents = _PyCodeToSource(code, self.memo).build_line_to_contents() lines = [] - for line, contents in sorted(dict_iter_items(line_to_contents)): + for line, contents in sorted(line_to_contents.items()): lines.append(line) self.writer.get_line(line).extend(contents) if DEBUG: @@ -587,13 +560,11 @@ class _PyCodeToSource(object): def disassemble(self): show_lines = False line_to_contents = self.build_line_to_contents() - from io import StringIO - stream = StringIO() last_line = 0 indent = '' previous_line_tokens = set() - for i_line, contents in sorted(dict_iter_items(line_to_contents)): + for i_line, contents in sorted(line_to_contents.items()): while last_line < i_line - 1: if show_lines: stream.write(u"%s.\n" % (last_line + 1,)) diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_collect_bytecode_info.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_collect_bytecode_info.py index 39da7150..48f90efa 100644 --- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_collect_bytecode_info.py +++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_collect_bytecode_info.py @@ -4,15 +4,12 @@ import sys from collections import namedtuple from _pydev_bundle import pydev_log -from _pydevd_bundle.pydevd_constants import (IS_PY38_OR_GREATER, - dict_iter_items, dict_iter_values) +from _pydevd_bundle.pydevd_constants import IS_PY38_OR_GREATER from opcode import (EXTENDED_ARG, HAVE_ARGUMENT, cmp_op, hascompare, hasconst, hasfree, hasjrel, haslocal, hasname, opname) -try: - xrange -except NameError: - xrange = range +xrange = range +from io import StringIO class TryExceptInfo(object): @@ -892,9 +889,9 @@ class _Disassembler(object): instruction.argval, self.firstlineno, self.level + 1 ).build_line_to_contents() - for contents in dict_iter_values(code_line_to_contents): + for contents in code_line_to_contents.values(): contents.insert(0, ' ') - for line, contents in dict_iter_items(code_line_to_contents): + for line, contents in code_line_to_contents.items(): line_to_contents.setdefault(line, []).extend(contents) return msg(instruction, 'LOAD_CONST(code)') @@ -935,14 +932,10 @@ class _Disassembler(object): def disassemble(self): line_to_contents = self.build_line_to_contents() - try: - from StringIO import StringIO - except ImportError: - from io import StringIO stream = StringIO() last_line = 0 show_lines = False - for line, contents in sorted(dict_iter_items(line_to_contents)): + for line, contents in sorted(line_to_contents.items()): while last_line < line - 1: if show_lines: stream.write('%s.\n' % (last_line + 1,)) diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_comm.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_comm.py index 6e41ae72..395544c6 100644 --- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_comm.py +++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_comm.py @@ -71,7 +71,7 @@ from _pydev_imps._pydev_saved_modules import time from _pydev_imps._pydev_saved_modules import threading from _pydev_imps._pydev_saved_modules import socket as socket_module from _pydevd_bundle.pydevd_constants import (DebugInfoHolder, IS_WINDOWS, IS_JYTHON, - IS_PY2, IS_PY36_OR_GREATER, STATE_RUN, dict_keys, ASYNC_EVAL_TIMEOUT_SEC, + IS_PY36_OR_GREATER, STATE_RUN, ASYNC_EVAL_TIMEOUT_SEC, get_global_debugger, GetGlobalDebugger, set_global_debugger, silence_warnings_decorator) # Keep for backward compatibility @UnusedImport from _pydev_bundle.pydev_override import overrides import weakref @@ -89,20 +89,10 @@ import dis from _pydevd_bundle.pydevd_frame_utils import create_frames_list_from_exception_cause import pydevd_file_utils import itertools -from functools import partial -try: - from urllib import quote_plus, unquote_plus # @UnresolvedImport -except: - from urllib.parse import quote_plus, unquote_plus # @Reimport @UnresolvedImport - +from urllib.parse import quote_plus, unquote_plus import pydevconsole from _pydevd_bundle import pydevd_vars, pydevd_io, pydevd_reload - -try: - from _pydevd_bundle import pydevd_bytecode_utils -except ImportError: - pydevd_bytecode_utils = None # i.e.: Not available on Py2. - +from _pydevd_bundle import pydevd_bytecode_utils from _pydevd_bundle import pydevd_xml from _pydevd_bundle import pydevd_vm_type import sys @@ -116,13 +106,7 @@ from _pydev_bundle import _pydev_completer from pydevd_tracing import get_exception_traceback_str from _pydevd_bundle import pydevd_console from _pydev_bundle.pydev_monkey import disable_trace_thread_modules, enable_trace_thread_modules -try: - import cStringIO as StringIO # may not always be available @UnusedImport -except: - try: - import StringIO # @Reimport @UnresolvedImport - except: - import io as StringIO +from io import StringIO # CMD_XXX constants imported for backward compatibility from _pydevd_bundle.pydevd_comm_constants import * # @UnusedWildImport @@ -583,9 +567,6 @@ def _send_io_message(py_db, s): def internal_reload_code(dbg, seq, module_name, filename): try: found_module_to_reload = False - if IS_PY2 and isinstance(filename, unicode): - filename = filename.encode(sys.getfilesystemencoding()) - if module_name is not None: module_name = module_name if module_name not in sys.modules: @@ -727,11 +708,6 @@ class InternalSetNextStatementThread(InternalThreadCommand): self.line = line self.seq = seq - if IS_PY2: - if isinstance(func_name, unicode): - # On cython with python 2.X it requires an str, not unicode (but on python 3.3 it should be a str, not bytes). - func_name = func_name.encode('utf-8') - self.func_name = func_name def do_it(self, dbg): @@ -808,18 +784,18 @@ class InternalGetVariable(InternalThreadCommand): def do_it(self, dbg): ''' Converts request into python variable ''' try: - xml = StringIO.StringIO() + xml = StringIO() xml.write("") - _typeName, val_dict = pydevd_vars.resolve_compound_variable_fields( + type_name, val_dict = pydevd_vars.resolve_compound_variable_fields( dbg, self.thread_id, self.frame_id, self.scope, self.attributes) if val_dict is None: val_dict = {} # assume properly ordered if resolver returns 'OrderedDict' # check type as string to support OrderedDict backport for older Python - keys = dict_keys(val_dict) - if not (_typeName == "OrderedDict" or val_dict.__class__.__name__ == "OrderedDict" or IS_PY36_OR_GREATER): - keys.sort(key=compare_object_attrs_key) + keys = list(val_dict) + if not (type_name == "OrderedDict" or val_dict.__class__.__name__ == "OrderedDict" or IS_PY36_OR_GREATER): + keys = sorted(keys, key=compare_object_attrs_key) timer = Timer() for k in keys: @@ -1167,12 +1143,6 @@ def internal_evaluate_expression_json(py_db, request, thread_id): ctx = NULL with ctx: - if IS_PY2 and isinstance(expression, unicode): - try: - expression.encode('utf-8') - except Exception: - _evaluate_response(py_db, request, '', error_message='Expression is not valid utf-8.') - raise try_exec = False if frame_id is None: @@ -1338,19 +1308,6 @@ def internal_set_expression_json(py_db, request, thread_id): if hasattr(fmt, 'to_dict'): fmt = fmt.to_dict() - if IS_PY2 and isinstance(expression, unicode): - try: - expression = expression.encode('utf-8') - except: - _evaluate_response(py_db, request, '', error_message='Expression is not valid utf-8.') - raise - if IS_PY2 and isinstance(value, unicode): - try: - value = value.encode('utf-8') - except: - _evaluate_response(py_db, request, '', error_message='Value is not valid utf-8.') - raise - frame = py_db.find_frame(thread_id, frame_id) exec_code = '%s = (%s)' % (expression, value) result = pydevd_vars.evaluate_expression(py_db, frame, exec_code, is_exec=True) @@ -1402,12 +1359,6 @@ def internal_get_completions(dbg, seq, thread_id, frame_id, act_tok, line=-1, co frame = dbg.find_frame(thread_id, frame_id) if frame is not None: - if IS_PY2: - if not isinstance(act_tok, bytes): - act_tok = act_tok.encode('utf-8') - if not isinstance(qualifier, bytes): - qualifier = qualifier.encode('utf-8') - completions = _pydev_completer.generate_completions(frame, act_tok) # Note that qualifier and start are only actually valid for the @@ -1833,7 +1784,7 @@ class AbstractGetValueAsyncThread(PyDBDaemonThread): @overrides(PyDBDaemonThread._on_run) def _on_run(self): start = time.time() - xml = StringIO.StringIO() + xml = StringIO() xml.write("") for (var_obj, name) in self.var_objs: current_time = time.time() diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_constants.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_constants.py index 6a8f3a3b..61705b7c 100644 --- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_constants.py +++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_constants.py @@ -104,15 +104,12 @@ IS_MAC = sys.platform == 'darwin' IS_64BIT_PROCESS = sys.maxsize > (2 ** 32) IS_JYTHON = pydevd_vm_type.get_vm_type() == pydevd_vm_type.PydevdVmType.JYTHON -IS_JYTH_LESS25 = False IS_PYPY = platform.python_implementation() == 'PyPy' if IS_JYTHON: import java.lang.System # @UnresolvedImport IS_WINDOWS = java.lang.System.getProperty("os.name").lower().startswith("windows") - if sys.version_info[0] == 2 and sys.version_info[1] < 5: - IS_JYTH_LESS25 = True USE_CUSTOM_SYS_CURRENT_FRAMES = not hasattr(sys, '_current_frames') or IS_PYPY USE_CUSTOM_SYS_CURRENT_FRAMES_MAP = USE_CUSTOM_SYS_CURRENT_FRAMES and (IS_PYPY or IS_IRONPYTHON) @@ -166,45 +163,16 @@ CYTHON_SUPPORTED = False python_implementation = platform.python_implementation() if python_implementation == 'CPython': # Only available for CPython! - if ( - (sys.version_info[0] == 2 and sys.version_info[1] >= 6) - or (sys.version_info[0] == 3 and sys.version_info[1] >= 3) - or (sys.version_info[0] > 3) - ): - # Supported in 2.6,2.7 or 3.3 onwards (32 or 64) - CYTHON_SUPPORTED = True + CYTHON_SUPPORTED = True #======================================================================================================================= # Python 3? #======================================================================================================================= -IS_PY3K = False -IS_PY34_OR_GREATER = False -IS_PY35_OR_GREATER = False -IS_PY36_OR_GREATER = False -IS_PY37_OR_GREATER = False -IS_PY38_OR_GREATER = False -IS_PY39_OR_GREATER = False -IS_PY310_OR_GREATER = False -IS_PY2 = True -IS_PY27 = False -IS_PY24 = False -try: - if sys.version_info[0] >= 3: - IS_PY3K = True - IS_PY2 = False - IS_PY34_OR_GREATER = sys.version_info >= (3, 4) - IS_PY35_OR_GREATER = sys.version_info >= (3, 5) - IS_PY36_OR_GREATER = sys.version_info >= (3, 6) - IS_PY37_OR_GREATER = sys.version_info >= (3, 7) - IS_PY38_OR_GREATER = sys.version_info >= (3, 8) - IS_PY39_OR_GREATER = sys.version_info >= (3, 9) - IS_PY310_OR_GREATER = sys.version_info >= (3, 10) - elif sys.version_info[0] == 2 and sys.version_info[1] == 7: - IS_PY27 = True - elif sys.version_info[0] == 2 and sys.version_info[1] == 4: - IS_PY24 = True -except AttributeError: - pass # Not all versions have sys.version_info +IS_PY36_OR_GREATER = sys.version_info >= (3, 6) +IS_PY37_OR_GREATER = sys.version_info >= (3, 7) +IS_PY38_OR_GREATER = sys.version_info >= (3, 8) +IS_PY39_OR_GREATER = sys.version_info >= (3, 9) +IS_PY310_OR_GREATER = sys.version_info >= (3, 10) def version_str(v): @@ -309,7 +277,7 @@ LOAD_VALUES_ASYNC = is_true_in_env('PYDEVD_LOAD_VALUES_ASYNC') DEFAULT_VALUE = "__pydevd_value_async" ASYNC_EVAL_TIMEOUT_SEC = 60 NEXT_VALUE_SEPARATOR = "__pydev_val__" -BUILTINS_MODULE_NAME = '__builtin__' if IS_PY2 else 'builtins' +BUILTINS_MODULE_NAME = 'builtins' SHOW_DEBUG_INFO_ENV = is_true_in_env(('PYCHARM_DEBUG', 'PYDEV_DEBUG', 'PYDEVD_DEBUG')) # Pandas customization. @@ -472,68 +440,10 @@ def after_fork(): _thread_id_lock = ForkSafeLock() thread_get_ident = thread.get_ident -if IS_PY3K: - def dict_keys(d): - return list(d.keys()) - - def dict_values(d): - return list(d.values()) - - dict_iter_values = dict.values - - def dict_iter_items(d): - return d.items() - - def dict_items(d): - return list(d.items()) - - def as_str(s): - assert isinstance(s, str) - return s - -else: - dict_keys = None - try: - dict_keys = dict.keys - except: - pass - - if IS_JYTHON or not dict_keys: - - def dict_keys(d): - return d.keys() - - try: - dict_iter_values = dict.itervalues - except: - try: - dict_iter_values = dict.values # Older versions don't have the itervalues - except: - - def dict_iter_values(d): - return d.values() - - try: - dict_values = dict.values - except: - - def dict_values(d): - return d.values() - - def dict_iter_items(d): - try: - return d.iteritems() - except: - return d.items() - - def dict_items(d): - return d.items() - - def as_str(s): - if isinstance(s, unicode): - return s.encode('utf-8') - return s +def as_str(s): + assert isinstance(s, str) + return s def silence_warnings_decorator(func): @@ -548,36 +458,22 @@ def silence_warnings_decorator(func): def sorted_dict_repr(d): - s = sorted(dict_iter_items(d), key=lambda x:str(x[0])) + s = sorted(d.items(), key=lambda x:str(x[0])) return '{' + ', '.join(('%r: %r' % x) for x in s) + '}' def iter_chars(b): # In Python 2, we can iterate bytes or unicode with individual characters, but Python 3 onwards # changed that behavior so that when iterating bytes we actually get ints! - if not IS_PY2: - if isinstance(b, bytes): - # i.e.: do something as struct.unpack('3c', b) - return iter(struct.unpack(str(len(b)) + 'c', b)) + if isinstance(b, bytes): + # i.e.: do something as struct.unpack('3c', b) + return iter(struct.unpack(str(len(b)) + 'c', b)) return iter(b) -try: - xrange = xrange -except: - # Python 3k does not have it - xrange = range - -try: - import itertools - izip = itertools.izip -except: - izip = zip - -try: - from StringIO import StringIO -except: - from io import StringIO +# Python 3k does not have it +xrange = range +izip = zip if IS_JYTHON: diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_cython.c b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_cython.c index 9b2a638f..40640a5a 100644 --- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_cython.c +++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_cython.c @@ -1549,6 +1549,29 @@ static CYTHON_INLINE PyObject* __Pyx_dict_iterator(PyObject* dict, int is_dict, static CYTHON_INLINE int __Pyx_dict_iter_next(PyObject* dict_or_iter, Py_ssize_t orig_length, Py_ssize_t* ppos, PyObject** pkey, PyObject** pvalue, PyObject** pitem, int is_dict); +/* py_dict_values.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyDict_Values(PyObject* d); + +/* CallUnboundCMethod0.proto */ +static PyObject* __Pyx__CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObject* self); +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_CallUnboundCMethod0(cfunc, self)\ + (likely((cfunc)->func) ?\ + (likely((cfunc)->flag == METH_NOARGS) ? (*((cfunc)->func))(self, NULL) :\ + (PY_VERSION_HEX >= 0x030600B1 && likely((cfunc)->flag == METH_FASTCALL) ?\ + (PY_VERSION_HEX >= 0x030700A0 ?\ + (*(__Pyx_PyCFunctionFast)(void*)(PyCFunction)(cfunc)->func)(self, &__pyx_empty_tuple, 0) :\ + (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)(cfunc)->func)(self, &__pyx_empty_tuple, 0, NULL)) :\ + (PY_VERSION_HEX >= 0x030700A0 && (cfunc)->flag == (METH_FASTCALL | METH_KEYWORDS) ?\ + (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)(cfunc)->func)(self, &__pyx_empty_tuple, 0, NULL) :\ + (likely((cfunc)->flag == (METH_VARARGS | METH_KEYWORDS)) ? ((*(PyCFunctionWithKeywords)(void*)(PyCFunction)(cfunc)->func)(self, __pyx_empty_tuple, NULL)) :\ + ((cfunc)->flag == METH_VARARGS ? (*((cfunc)->func))(self, __pyx_empty_tuple) :\ + __Pyx__CallUnboundCMethod0(cfunc, self)))))) :\ + __Pyx__CallUnboundCMethod0(cfunc, self)) +#else +#define __Pyx_CallUnboundCMethod0(cfunc, self) __Pyx__CallUnboundCMethod0(cfunc, self) +#endif + /* DictGetItem.proto */ #if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key); @@ -1816,8 +1839,8 @@ static const char __pyx_k_reduce[] = "__reduce__"; static const char __pyx_k_return[] = "return"; static const char __pyx_k_thread[] = "thread"; static const char __pyx_k_update[] = "update"; +static const char __pyx_k_values[] = "values"; static const char __pyx_k_writer[] = "writer"; -static const char __pyx_k_IS_PY3K[] = "IS_PY3K"; static const char __pyx_k_co_name[] = "co_name"; static const char __pyx_k_compile[] = "compile"; static const char __pyx_k_f_lasti[] = "f_lasti"; @@ -1927,7 +1950,6 @@ static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; static const char __pyx_k_trace_exception[] = "trace_exception"; static const char __pyx_k_DEBUG_START_PY3K[] = "DEBUG_START_PY3K"; -static const char __pyx_k_dict_iter_values[] = "dict_iter_values"; static const char __pyx_k_in_project_scope[] = "in_project_scope"; static const char __pyx_k_threading_active[] = "threading_active"; static const char __pyx_k_try_except_infos[] = "try_except_infos"; @@ -2053,7 +2075,6 @@ static PyObject *__pyx_n_s_EXCEPTION_TYPE_USER_UNHANDLED; static PyObject *__pyx_n_s_ForkSafeLock; static PyObject *__pyx_n_s_GeneratorExit; static PyObject *__pyx_n_s_IGNORE_EXCEPTION_TAG; -static PyObject *__pyx_n_s_IS_PY3K; static PyObject *__pyx_kp_s_IgnoreException; static PyObject *__pyx_kp_s_Ignore_exception_s_in_library_s; static PyObject *__pyx_n_s_ImportError; @@ -2135,7 +2156,6 @@ static PyObject *__pyx_n_s_constructed_tid_to_last_frame; static PyObject *__pyx_n_s_current_frames; static PyObject *__pyx_n_s_debug; static PyObject *__pyx_n_s_dict; -static PyObject *__pyx_n_s_dict_iter_values; static PyObject *__pyx_n_s_dis; static PyObject *__pyx_n_s_disable_tracing; static PyObject *__pyx_n_s_do_wait_suspend; @@ -2332,6 +2352,7 @@ static PyObject *__pyx_n_s_try_exc_info; static PyObject *__pyx_n_s_try_except_infos; static PyObject *__pyx_n_s_update; static PyObject *__pyx_kp_s_utf_8; +static PyObject *__pyx_n_s_values; static PyObject *__pyx_n_s_version; static PyObject *__pyx_n_s_writer; static int __pyx_pf_14_pydevd_bundle_13pydevd_cython_24PyDBAdditionalThreadInfo___init__(struct __pyx_obj_14_pydevd_bundle_13pydevd_cython_PyDBAdditionalThreadInfo *__pyx_v_self); /* proto */ @@ -2481,6 +2502,7 @@ static PyObject *__pyx_tp_new_14_pydevd_bundle_13pydevd_cython_TopLevelThreadTra static PyObject *__pyx_tp_new_14_pydevd_bundle_13pydevd_cython_ThreadTracer(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static __Pyx_CachedCFunction __pyx_umethod_PyDict_Type_get = {0, &__pyx_n_s_get, 0, 0, 0}; static __Pyx_CachedCFunction __pyx_umethod_PyDict_Type_update = {0, &__pyx_n_s_update, 0, 0, 0}; +static __Pyx_CachedCFunction __pyx_umethod_PyDict_Type_values = {0, &__pyx_n_s_values, 0, 0, 0}; static __Pyx_CachedCFunction __pyx_umethod_PyString_Type_rfind = {0, &__pyx_n_s_rfind, 0, 0, 0}; static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; @@ -17316,7 +17338,7 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa * if curr_func_name in ('?', '', ''): * curr_func_name = '' # <<<<<<<<<<<<<< * - * for bp in dict_iter_values(breakpoints_for_file): # jython does not support itervalues() + * for bp in breakpoints_for_file.values(): */ __Pyx_INCREF(__pyx_kp_s_); __Pyx_DECREF_SET(__pyx_v_curr_func_name, __pyx_kp_s_); @@ -17333,27 +17355,16 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa /* "_pydevd_bundle/pydevd_cython.pyx":958 * curr_func_name = '' * - * for bp in dict_iter_values(breakpoints_for_file): # jython does not support itervalues() # <<<<<<<<<<<<<< + * for bp in breakpoints_for_file.values(): # <<<<<<<<<<<<<< * # will match either global or some function * if bp.func_name in ('None', curr_func_name): */ - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_dict_iter_values); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 958, __pyx_L74_except_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_8 = NULL; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_8)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_8); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - } + if (unlikely(__pyx_v_breakpoints_for_file == Py_None)) { + PyErr_Format(PyExc_AttributeError, "'NoneType' object has no attribute '%.30s'", "values"); + __PYX_ERR(0, 958, __pyx_L74_except_error) } - __pyx_t_6 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_8, __pyx_v_breakpoints_for_file) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_v_breakpoints_for_file); - __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; - if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 958, __pyx_L74_except_error) + __pyx_t_6 = __Pyx_PyDict_Values(__pyx_v_breakpoints_for_file); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 958, __pyx_L74_except_error) __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (likely(PyList_CheckExact(__pyx_t_6)) || PyTuple_CheckExact(__pyx_t_6)) { __pyx_t_3 = __pyx_t_6; __Pyx_INCREF(__pyx_t_3); __pyx_t_19 = 0; __pyx_t_12 = NULL; @@ -17398,7 +17409,7 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa __pyx_t_6 = 0; /* "_pydevd_bundle/pydevd_cython.pyx":960 - * for bp in dict_iter_values(breakpoints_for_file): # jython does not support itervalues() + * for bp in breakpoints_for_file.values(): * # will match either global or some function * if bp.func_name in ('None', curr_func_name): # <<<<<<<<<<<<<< * has_breakpoint_in_frame = True @@ -17438,7 +17449,7 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa goto __pyx_L90_break; /* "_pydevd_bundle/pydevd_cython.pyx":960 - * for bp in dict_iter_values(breakpoints_for_file): # jython does not support itervalues() + * for bp in breakpoints_for_file.values(): * # will match either global or some function * if bp.func_name in ('None', curr_func_name): # <<<<<<<<<<<<<< * has_breakpoint_in_frame = True @@ -17449,7 +17460,7 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa /* "_pydevd_bundle/pydevd_cython.pyx":958 * curr_func_name = '' * - * for bp in dict_iter_values(breakpoints_for_file): # jython does not support itervalues() # <<<<<<<<<<<<<< + * for bp in breakpoints_for_file.values(): # <<<<<<<<<<<<<< * # will match either global or some function * if bp.func_name in ('None', curr_func_name): */ @@ -21573,7 +21584,7 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa * else: * stop = False # <<<<<<<<<<<<<< * - * if stop and step_cmd != -1 and is_return and IS_PY3K and hasattr(frame, "f_back"): + * if stop and step_cmd != -1 and is_return and hasattr(frame, "f_back"): */ /*else*/ { __pyx_v_stop = 0; @@ -21583,7 +21594,7 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa /* "_pydevd_bundle/pydevd_cython.pyx":1242 * stop = False * - * if stop and step_cmd != -1 and is_return and IS_PY3K and hasattr(frame, "f_back"): # <<<<<<<<<<<<<< + * if stop and step_cmd != -1 and is_return and hasattr(frame, "f_back"): # <<<<<<<<<<<<<< * f_code = getattr(frame.f_back, 'f_code', None) * if f_code is not None: */ @@ -21605,15 +21616,6 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa __pyx_t_14 = __pyx_t_9; goto __pyx_L236_bool_binop_done; } - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_IS_PY3K); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1242, __pyx_L166_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_9 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 1242, __pyx_L166_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_9) { - } else { - __pyx_t_14 = __pyx_t_9; - goto __pyx_L236_bool_binop_done; - } __pyx_t_9 = __Pyx_HasAttr(__pyx_v_frame, __pyx_n_s_f_back); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(0, 1242, __pyx_L166_error) __pyx_t_10 = (__pyx_t_9 != 0); __pyx_t_14 = __pyx_t_10; @@ -21622,7 +21624,7 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa /* "_pydevd_bundle/pydevd_cython.pyx":1243 * - * if stop and step_cmd != -1 and is_return and IS_PY3K and hasattr(frame, "f_back"): + * if stop and step_cmd != -1 and is_return and hasattr(frame, "f_back"): * f_code = getattr(frame.f_back, 'f_code', None) # <<<<<<<<<<<<<< * if f_code is not None: * if main_debugger.get_file_type(frame.f_back) == main_debugger.PYDEV_FILE: @@ -21636,7 +21638,7 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa __pyx_t_6 = 0; /* "_pydevd_bundle/pydevd_cython.pyx":1244 - * if stop and step_cmd != -1 and is_return and IS_PY3K and hasattr(frame, "f_back"): + * if stop and step_cmd != -1 and is_return and hasattr(frame, "f_back"): * f_code = getattr(frame.f_back, 'f_code', None) * if f_code is not None: # <<<<<<<<<<<<<< * if main_debugger.get_file_type(frame.f_back) == main_debugger.PYDEV_FILE: @@ -21701,7 +21703,7 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa } /* "_pydevd_bundle/pydevd_cython.pyx":1244 - * if stop and step_cmd != -1 and is_return and IS_PY3K and hasattr(frame, "f_back"): + * if stop and step_cmd != -1 and is_return and hasattr(frame, "f_back"): * f_code = getattr(frame.f_back, 'f_code', None) * if f_code is not None: # <<<<<<<<<<<<<< * if main_debugger.get_file_type(frame.f_back) == main_debugger.PYDEV_FILE: @@ -21712,7 +21714,7 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa /* "_pydevd_bundle/pydevd_cython.pyx":1242 * stop = False * - * if stop and step_cmd != -1 and is_return and IS_PY3K and hasattr(frame, "f_back"): # <<<<<<<<<<<<<< + * if stop and step_cmd != -1 and is_return and hasattr(frame, "f_back"): # <<<<<<<<<<<<<< * f_code = getattr(frame.f_back, 'f_code', None) * if f_code is not None: */ @@ -21811,7 +21813,7 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa * stopped_on_plugin = plugin_manager.stop(main_debugger, frame, event, self._args, stop_info, arg, step_cmd) * elif stop: */ - goto __pyx_L243; + goto __pyx_L242; } /* "_pydevd_bundle/pydevd_cython.pyx":1250 @@ -21935,7 +21937,7 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa * self.set_suspend(thread, step_cmd, original_step_cmd=info.pydev_original_step_cmd) * self.do_wait_suspend(thread, frame, event, arg) */ - goto __pyx_L244; + goto __pyx_L243; } /* "_pydevd_bundle/pydevd_cython.pyx":1254 @@ -22031,22 +22033,22 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_13 = Py_TYPE(__pyx_t_8)->tp_iternext; - index = 0; __pyx_t_3 = __pyx_t_13(__pyx_t_8); if (unlikely(!__pyx_t_3)) goto __pyx_L246_unpacking_failed; + index = 0; __pyx_t_3 = __pyx_t_13(__pyx_t_8); if (unlikely(!__pyx_t_3)) goto __pyx_L245_unpacking_failed; __Pyx_GOTREF(__pyx_t_3); - index = 1; __pyx_t_4 = __pyx_t_13(__pyx_t_8); if (unlikely(!__pyx_t_4)) goto __pyx_L246_unpacking_failed; + index = 1; __pyx_t_4 = __pyx_t_13(__pyx_t_8); if (unlikely(!__pyx_t_4)) goto __pyx_L245_unpacking_failed; __Pyx_GOTREF(__pyx_t_4); - index = 2; __pyx_t_7 = __pyx_t_13(__pyx_t_8); if (unlikely(!__pyx_t_7)) goto __pyx_L246_unpacking_failed; + index = 2; __pyx_t_7 = __pyx_t_13(__pyx_t_8); if (unlikely(!__pyx_t_7)) goto __pyx_L245_unpacking_failed; __Pyx_GOTREF(__pyx_t_7); if (__Pyx_IternextUnpackEndCheck(__pyx_t_13(__pyx_t_8), 3) < 0) __PYX_ERR(0, 1260, __pyx_L166_error) __pyx_t_13 = NULL; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - goto __pyx_L247_unpacking_done; - __pyx_L246_unpacking_failed:; + goto __pyx_L246_unpacking_done; + __pyx_L245_unpacking_failed:; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_13 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); __PYX_ERR(0, 1260, __pyx_L166_error) - __pyx_L247_unpacking_done:; + __pyx_L246_unpacking_done:; } __pyx_v_back_absolute_filename = __pyx_t_3; __pyx_t_3 = 0; @@ -22084,7 +22086,7 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa if (!__pyx_t_10) { } else { __pyx_t_14 = __pyx_t_10; - goto __pyx_L249_bool_binop_done; + goto __pyx_L248_bool_binop_done; } __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_DEBUG_START_PY3K); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1261, __pyx_L166_error) __Pyx_GOTREF(__pyx_t_4); @@ -22093,7 +22095,7 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa __pyx_t_10 = __Pyx_PyObject_IsTrue(__pyx_t_7); if (unlikely(__pyx_t_10 < 0)) __PYX_ERR(0, 1261, __pyx_L166_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_14 = __pyx_t_10; - __pyx_L249_bool_binop_done:; + __pyx_L248_bool_binop_done:; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_10 = (__pyx_t_14 != 0); if (__pyx_t_10) { @@ -22115,7 +22117,7 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa * back = None * */ - goto __pyx_L248; + goto __pyx_L247; } /* "_pydevd_bundle/pydevd_cython.pyx":1264 @@ -22306,7 +22308,7 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa * # In this case, we'll have to skip the previous one because it shouldn't be traced. */ } - __pyx_L248:; + __pyx_L247:; /* "_pydevd_bundle/pydevd_cython.pyx":1256 * elif is_return: # return event @@ -22429,7 +22431,7 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa * # if we're in a return, we want it to appear to the user in the previous frame! * self.set_suspend(thread, step_cmd, original_step_cmd=info.pydev_original_step_cmd) */ - goto __pyx_L252; + goto __pyx_L251; } /* "_pydevd_bundle/pydevd_cython.pyx":1285 @@ -22473,7 +22475,7 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa */ __pyx_v_info->pydev_state = 1; } - __pyx_L252:; + __pyx_L251:; /* "_pydevd_bundle/pydevd_cython.pyx":1254 * self.set_suspend(thread, step_cmd, original_step_cmd=info.pydev_original_step_cmd) @@ -22483,7 +22485,7 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa * if back is not None: */ } - __pyx_L244:; + __pyx_L243:; /* "_pydevd_bundle/pydevd_cython.pyx":1250 * if plugin_stop: @@ -22493,7 +22495,7 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa * self.set_suspend(thread, step_cmd, original_step_cmd=info.pydev_original_step_cmd) */ } - __pyx_L243:; + __pyx_L242:; /* "_pydevd_bundle/pydevd_cython.pyx":1093 * @@ -22584,9 +22586,9 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa * info.pydev_original_step_cmd = -1 * info.pydev_step_cmd = -1 */ - __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_pydev_log); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1294, __pyx_L257_error) + __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_pydev_log); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1294, __pyx_L256_error) __Pyx_GOTREF(__pyx_t_8); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_exception); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1294, __pyx_L257_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_exception); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1294, __pyx_L256_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = NULL; @@ -22601,7 +22603,7 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa } __pyx_t_3 = (__pyx_t_8) ? __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_8) : __Pyx_PyObject_CallNoArg(__pyx_t_1); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1294, __pyx_L257_error) + if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1294, __pyx_L256_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; @@ -22648,8 +22650,8 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa __Pyx_XDECREF(__pyx_t_28); __pyx_t_28 = 0; __Pyx_XDECREF(__pyx_t_27); __pyx_t_27 = 0; __Pyx_XDECREF(__pyx_t_26); __pyx_t_26 = 0; - goto __pyx_L264_try_end; - __pyx_L257_error:; + goto __pyx_L263_try_end; + __pyx_L256_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_21); __pyx_t_21 = 0; @@ -22665,7 +22667,7 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa */ /*except:*/ { __Pyx_AddTraceback("_pydevd_bundle.pydevd_cython.PyDBFrame.trace_dispatch", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_3, &__pyx_t_1, &__pyx_t_8) < 0) __PYX_ERR(0, 1298, __pyx_L259_except_error) + if (__Pyx_GetException(&__pyx_t_3, &__pyx_t_1, &__pyx_t_8) < 0) __PYX_ERR(0, 1298, __pyx_L258_except_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_8); @@ -22682,7 +22684,7 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa __Pyx_INCREF(Py_None); __pyx_t_2 = Py_None; } else { - __Pyx_GetModuleGlobalName(__pyx_t_30, __pyx_n_s_NO_FTRACE); if (unlikely(!__pyx_t_30)) __PYX_ERR(0, 1299, __pyx_L259_except_error) + __Pyx_GetModuleGlobalName(__pyx_t_30, __pyx_n_s_NO_FTRACE); if (unlikely(!__pyx_t_30)) __PYX_ERR(0, 1299, __pyx_L258_except_error) __Pyx_GOTREF(__pyx_t_30); __pyx_t_2 = __pyx_t_30; __pyx_t_30 = 0; @@ -22695,9 +22697,9 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - goto __pyx_L260_except_return; + goto __pyx_L259_except_return; } - __pyx_L259_except_error:; + __pyx_L258_except_error:; /* "_pydevd_bundle/pydevd_cython.pyx":1293 * raise @@ -22711,13 +22713,13 @@ static PyObject *__pyx_f_14_pydevd_bundle_13pydevd_cython_9PyDBFrame_trace_dispa __Pyx_XGIVEREF(__pyx_t_26); __Pyx_ExceptionReset(__pyx_t_28, __pyx_t_27, __pyx_t_26); goto __pyx_L168_except_error; - __pyx_L260_except_return:; + __pyx_L259_except_return:; __Pyx_XGIVEREF(__pyx_t_28); __Pyx_XGIVEREF(__pyx_t_27); __Pyx_XGIVEREF(__pyx_t_26); __Pyx_ExceptionReset(__pyx_t_28, __pyx_t_27, __pyx_t_26); goto __pyx_L169_except_return; - __pyx_L264_try_end:; + __pyx_L263_try_end:; } __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; @@ -36225,7 +36227,6 @@ static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_ForkSafeLock, __pyx_k_ForkSafeLock, sizeof(__pyx_k_ForkSafeLock), 0, 0, 1, 1}, {&__pyx_n_s_GeneratorExit, __pyx_k_GeneratorExit, sizeof(__pyx_k_GeneratorExit), 0, 0, 1, 1}, {&__pyx_n_s_IGNORE_EXCEPTION_TAG, __pyx_k_IGNORE_EXCEPTION_TAG, sizeof(__pyx_k_IGNORE_EXCEPTION_TAG), 0, 0, 1, 1}, - {&__pyx_n_s_IS_PY3K, __pyx_k_IS_PY3K, sizeof(__pyx_k_IS_PY3K), 0, 0, 1, 1}, {&__pyx_kp_s_IgnoreException, __pyx_k_IgnoreException, sizeof(__pyx_k_IgnoreException), 0, 0, 1, 0}, {&__pyx_kp_s_Ignore_exception_s_in_library_s, __pyx_k_Ignore_exception_s_in_library_s, sizeof(__pyx_k_Ignore_exception_s_in_library_s), 0, 0, 1, 0}, {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, @@ -36307,7 +36308,6 @@ static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_current_frames, __pyx_k_current_frames, sizeof(__pyx_k_current_frames), 0, 0, 1, 1}, {&__pyx_n_s_debug, __pyx_k_debug, sizeof(__pyx_k_debug), 0, 0, 1, 1}, {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, - {&__pyx_n_s_dict_iter_values, __pyx_k_dict_iter_values, sizeof(__pyx_k_dict_iter_values), 0, 0, 1, 1}, {&__pyx_n_s_dis, __pyx_k_dis, sizeof(__pyx_k_dis), 0, 0, 1, 1}, {&__pyx_n_s_disable_tracing, __pyx_k_disable_tracing, sizeof(__pyx_k_disable_tracing), 0, 0, 1, 1}, {&__pyx_n_s_do_wait_suspend, __pyx_k_do_wait_suspend, sizeof(__pyx_k_do_wait_suspend), 0, 0, 1, 1}, @@ -36504,6 +36504,7 @@ static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_try_except_infos, __pyx_k_try_except_infos, sizeof(__pyx_k_try_except_infos), 0, 0, 1, 1}, {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, {&__pyx_kp_s_utf_8, __pyx_k_utf_8, sizeof(__pyx_k_utf_8), 0, 0, 1, 0}, + {&__pyx_n_s_values, __pyx_k_values, sizeof(__pyx_k_values), 0, 0, 1, 1}, {&__pyx_n_s_version, __pyx_k_version, sizeof(__pyx_k_version), 0, 0, 1, 1}, {&__pyx_n_s_writer, __pyx_k_writer, sizeof(__pyx_k_writer), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} @@ -36707,6 +36708,7 @@ static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { __pyx_umethod_PyDict_Type_get.type = (PyObject*)&PyDict_Type; __pyx_umethod_PyDict_Type_update.type = (PyObject*)&PyDict_Type; + __pyx_umethod_PyDict_Type_values.type = (PyObject*)&PyDict_Type; __pyx_umethod_PyString_Type_rfind.type = (PyObject*)&PyString_Type; if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) @@ -37279,7 +37281,7 @@ if (!__Pyx_RefNanny) { * * from _pydev_bundle import pydev_log # <<<<<<<<<<<<<< * from _pydevd_bundle import pydevd_dont_trace - * from _pydevd_bundle.pydevd_constants import (dict_iter_values, IS_PY3K, RETURN_VALUES_DICT, NO_FTRACE, + * from _pydevd_bundle.pydevd_constants import (RETURN_VALUES_DICT, NO_FTRACE, */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 150, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); @@ -37299,7 +37301,7 @@ if (!__Pyx_RefNanny) { * * from _pydev_bundle import pydev_log * from _pydevd_bundle import pydevd_dont_trace # <<<<<<<<<<<<<< - * from _pydevd_bundle.pydevd_constants import (dict_iter_values, IS_PY3K, RETURN_VALUES_DICT, NO_FTRACE, + * from _pydevd_bundle.pydevd_constants import (RETURN_VALUES_DICT, NO_FTRACE, * EXCEPTION_TYPE_HANDLED, EXCEPTION_TYPE_USER_UNHANDLED) */ __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 151, __pyx_L1_error) @@ -37319,41 +37321,27 @@ if (!__Pyx_RefNanny) { /* "_pydevd_bundle/pydevd_cython.pyx":152 * from _pydev_bundle import pydev_log * from _pydevd_bundle import pydevd_dont_trace - * from _pydevd_bundle.pydevd_constants import (dict_iter_values, IS_PY3K, RETURN_VALUES_DICT, NO_FTRACE, # <<<<<<<<<<<<<< + * from _pydevd_bundle.pydevd_constants import (RETURN_VALUES_DICT, NO_FTRACE, # <<<<<<<<<<<<<< * EXCEPTION_TYPE_HANDLED, EXCEPTION_TYPE_USER_UNHANDLED) * from _pydevd_bundle.pydevd_frame_utils import add_exception_to_frame, just_raised, remove_exception_from_frame, ignore_exception_trace */ - __pyx_t_2 = PyList_New(6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 152, __pyx_L1_error) + __pyx_t_2 = PyList_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 152, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(__pyx_n_s_dict_iter_values); - __Pyx_GIVEREF(__pyx_n_s_dict_iter_values); - PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_dict_iter_values); - __Pyx_INCREF(__pyx_n_s_IS_PY3K); - __Pyx_GIVEREF(__pyx_n_s_IS_PY3K); - PyList_SET_ITEM(__pyx_t_2, 1, __pyx_n_s_IS_PY3K); __Pyx_INCREF(__pyx_n_s_RETURN_VALUES_DICT); __Pyx_GIVEREF(__pyx_n_s_RETURN_VALUES_DICT); - PyList_SET_ITEM(__pyx_t_2, 2, __pyx_n_s_RETURN_VALUES_DICT); + PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_RETURN_VALUES_DICT); __Pyx_INCREF(__pyx_n_s_NO_FTRACE); __Pyx_GIVEREF(__pyx_n_s_NO_FTRACE); - PyList_SET_ITEM(__pyx_t_2, 3, __pyx_n_s_NO_FTRACE); + PyList_SET_ITEM(__pyx_t_2, 1, __pyx_n_s_NO_FTRACE); __Pyx_INCREF(__pyx_n_s_EXCEPTION_TYPE_HANDLED); __Pyx_GIVEREF(__pyx_n_s_EXCEPTION_TYPE_HANDLED); - PyList_SET_ITEM(__pyx_t_2, 4, __pyx_n_s_EXCEPTION_TYPE_HANDLED); + PyList_SET_ITEM(__pyx_t_2, 2, __pyx_n_s_EXCEPTION_TYPE_HANDLED); __Pyx_INCREF(__pyx_n_s_EXCEPTION_TYPE_USER_UNHANDLED); __Pyx_GIVEREF(__pyx_n_s_EXCEPTION_TYPE_USER_UNHANDLED); - PyList_SET_ITEM(__pyx_t_2, 5, __pyx_n_s_EXCEPTION_TYPE_USER_UNHANDLED); + PyList_SET_ITEM(__pyx_t_2, 3, __pyx_n_s_EXCEPTION_TYPE_USER_UNHANDLED); __pyx_t_1 = __Pyx_Import(__pyx_n_s_pydevd_bundle_pydevd_constants, __pyx_t_2, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 152, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_dict_iter_values); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 152, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_dict_iter_values, __pyx_t_2) < 0) __PYX_ERR(0, 152, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_IS_PY3K); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 152, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_IS_PY3K, __pyx_t_2) < 0) __PYX_ERR(0, 152, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_RETURN_VALUES_DICT); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 152, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_RETURN_VALUES_DICT, __pyx_t_2) < 0) __PYX_ERR(0, 152, __pyx_L1_error) @@ -37373,7 +37361,7 @@ if (!__Pyx_RefNanny) { __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "_pydevd_bundle/pydevd_cython.pyx":154 - * from _pydevd_bundle.pydevd_constants import (dict_iter_values, IS_PY3K, RETURN_VALUES_DICT, NO_FTRACE, + * from _pydevd_bundle.pydevd_constants import (RETURN_VALUES_DICT, NO_FTRACE, * EXCEPTION_TYPE_HANDLED, EXCEPTION_TYPE_USER_UNHANDLED) * from _pydevd_bundle.pydevd_frame_utils import add_exception_to_frame, just_raised, remove_exception_from_frame, ignore_exception_trace # <<<<<<<<<<<<<< * from _pydevd_bundle.pydevd_utils import get_clsname_for_code @@ -40144,6 +40132,33 @@ static CYTHON_INLINE int __Pyx_dict_iter_next( return 1; } +/* CallUnboundCMethod0 */ +static PyObject* __Pyx__CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObject* self) { + PyObject *args, *result = NULL; + if (unlikely(!cfunc->method) && unlikely(__Pyx_TryUnpackUnboundCMethod(cfunc) < 0)) return NULL; +#if CYTHON_ASSUME_SAFE_MACROS + args = PyTuple_New(1); + if (unlikely(!args)) goto bad; + Py_INCREF(self); + PyTuple_SET_ITEM(args, 0, self); +#else + args = PyTuple_Pack(1, self); + if (unlikely(!args)) goto bad; +#endif + result = __Pyx_PyObject_Call(cfunc->method, args, NULL); + Py_DECREF(args); +bad: + return result; +} + +/* py_dict_values */ +static CYTHON_INLINE PyObject* __Pyx_PyDict_Values(PyObject* d) { + if (PY_MAJOR_VERSION >= 3) + return __Pyx_CallUnboundCMethod0(&__pyx_umethod_PyDict_Type_values, d); + else + return PyDict_Values(d); +} + /* DictGetItem */ #if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_cython.pyx b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_cython.pyx index 1919fe5e..2a999dad 100644 --- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_cython.pyx +++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_cython.pyx @@ -149,7 +149,7 @@ import re from _pydev_bundle import pydev_log from _pydevd_bundle import pydevd_dont_trace -from _pydevd_bundle.pydevd_constants import (dict_iter_values, IS_PY3K, RETURN_VALUES_DICT, NO_FTRACE, +from _pydevd_bundle.pydevd_constants import (RETURN_VALUES_DICT, NO_FTRACE, EXCEPTION_TYPE_HANDLED, EXCEPTION_TYPE_USER_UNHANDLED) from _pydevd_bundle.pydevd_frame_utils import add_exception_to_frame, just_raised, remove_exception_from_frame, ignore_exception_trace from _pydevd_bundle.pydevd_utils import get_clsname_for_code @@ -955,7 +955,7 @@ cdef class PyDBFrame: if curr_func_name in ('?', '', ''): curr_func_name = '' - for bp in dict_iter_values(breakpoints_for_file): # jython does not support itervalues() + for bp in breakpoints_for_file.values(): # will match either global or some function if bp.func_name in ('None', curr_func_name): has_breakpoint_in_frame = True @@ -1239,7 +1239,7 @@ cdef class PyDBFrame: else: stop = False - if stop and step_cmd != -1 and is_return and IS_PY3K and hasattr(frame, "f_back"): + if stop and step_cmd != -1 and is_return and hasattr(frame, "f_back"): f_code = getattr(frame.f_back, 'f_code', None) if f_code is not None: if main_debugger.get_file_type(frame.f_back) == main_debugger.PYDEV_FILE: diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_dont_trace_files.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_dont_trace_files.py index 2b1ed5b0..4222daea 100644 --- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_dont_trace_files.py +++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_dont_trace_files.py @@ -3,8 +3,6 @@ # DO NOT edit manually! # DO NOT edit manually! -from _pydevd_bundle.pydevd_constants import IS_PY3K - LIB_FILE = 1 PYDEV_FILE = 2 @@ -36,23 +34,17 @@ DONT_TRACE = { # things from pydev that we don't want to trace '_pydev_execfile.py':PYDEV_FILE, '__main__pydevd_gen_debug_adapter_protocol.py': PYDEV_FILE, - '_pydev_BaseHTTPServer.py': PYDEV_FILE, - '_pydev_SimpleXMLRPCServer.py': PYDEV_FILE, - '_pydev_SocketServer.py': PYDEV_FILE, '_pydev_calltip_util.py': PYDEV_FILE, '_pydev_completer.py': PYDEV_FILE, '_pydev_execfile.py': PYDEV_FILE, '_pydev_filesystem_encoding.py': PYDEV_FILE, '_pydev_getopt.py': PYDEV_FILE, '_pydev_imports_tipper.py': PYDEV_FILE, - '_pydev_inspect.py': PYDEV_FILE, '_pydev_jy_imports_tipper.py': PYDEV_FILE, '_pydev_log.py': PYDEV_FILE, - '_pydev_pkgutil_old.py': PYDEV_FILE, '_pydev_saved_modules.py': PYDEV_FILE, '_pydev_sys_patch.py': PYDEV_FILE, '_pydev_tipper_common.py': PYDEV_FILE, - '_pydev_xmlrpclib.py': PYDEV_FILE, 'django_debug.py': PYDEV_FILE, 'jinja2_debug.py': PYDEV_FILE, 'pycompletionserver.py': PYDEV_FILE, @@ -102,7 +94,6 @@ DONT_TRACE = { 'pydevd_defaults.py': PYDEV_FILE, 'pydevd_dont_trace.py': PYDEV_FILE, 'pydevd_dont_trace_files.py': PYDEV_FILE, - 'pydevd_exec.py': PYDEV_FILE, 'pydevd_exec2.py': PYDEV_FILE, 'pydevd_extension_api.py': PYDEV_FILE, 'pydevd_extension_utils.py': PYDEV_FILE, @@ -155,11 +146,10 @@ DONT_TRACE = { 'scandir_vendored.py': PYDEV_FILE, } -if IS_PY3K: - # if we try to trace io.py it seems it can get halted (see http://bugs.python.org/issue4716) - DONT_TRACE['io.py'] = LIB_FILE +# if we try to trace io.py it seems it can get halted (see http://bugs.python.org/issue4716) +DONT_TRACE['io.py'] = LIB_FILE - # Don't trace common encodings too - DONT_TRACE['cp1252.py'] = LIB_FILE - DONT_TRACE['utf_8.py'] = LIB_FILE - DONT_TRACE['codecs.py'] = LIB_FILE +# Don't trace common encodings too +DONT_TRACE['cp1252.py'] = LIB_FILE +DONT_TRACE['utf_8.py'] = LIB_FILE +DONT_TRACE['codecs.py'] = LIB_FILE diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_exec.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_exec.py deleted file mode 100644 index 9a342ee1..00000000 --- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_exec.py +++ /dev/null @@ -1,5 +0,0 @@ -def Exec(exp, global_vars, local_vars=None): - if local_vars is not None: - exec exp in global_vars, local_vars - else: - exec exp in global_vars \ No newline at end of file diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_frame.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_frame.py index 8320a2ad..aee52b1e 100644 --- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_frame.py +++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_frame.py @@ -4,7 +4,7 @@ import re from _pydev_bundle import pydev_log from _pydevd_bundle import pydevd_dont_trace -from _pydevd_bundle.pydevd_constants import (dict_iter_values, IS_PY3K, RETURN_VALUES_DICT, NO_FTRACE, +from _pydevd_bundle.pydevd_constants import (RETURN_VALUES_DICT, NO_FTRACE, EXCEPTION_TYPE_HANDLED, EXCEPTION_TYPE_USER_UNHANDLED) from _pydevd_bundle.pydevd_frame_utils import add_exception_to_frame, just_raised, remove_exception_from_frame, ignore_exception_trace from _pydevd_bundle.pydevd_utils import get_clsname_for_code @@ -822,7 +822,7 @@ class PyDBFrame: if curr_func_name in ('?', '', ''): curr_func_name = '' - for bp in dict_iter_values(breakpoints_for_file): # jython does not support itervalues() + for bp in breakpoints_for_file.values(): # will match either global or some function if bp.func_name in ('None', curr_func_name): has_breakpoint_in_frame = True @@ -1106,7 +1106,7 @@ class PyDBFrame: else: stop = False - if stop and step_cmd != -1 and is_return and IS_PY3K and hasattr(frame, "f_back"): + if stop and step_cmd != -1 and is_return and hasattr(frame, "f_back"): f_code = getattr(frame.f_back, 'f_code', None) if f_code is not None: if main_debugger.get_file_type(frame.f_back) == main_debugger.PYDEV_FILE: diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_io.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_io.py index cb870f96..3682c4de 100644 --- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_io.py +++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_io.py @@ -1,4 +1,4 @@ -from _pydevd_bundle.pydevd_constants import ForkSafeLock, get_global_debugger, IS_PY2 +from _pydevd_bundle.pydevd_constants import ForkSafeLock, get_global_debugger import os import sys from contextlib import contextmanager @@ -100,18 +100,9 @@ class RedirectToPyDBIoMessages(object): return if s: - if IS_PY2: - # Need s in utf-8 bytes - if isinstance(s, unicode): # noqa - # Note: python 2.6 does not accept the "errors" keyword. - s = s.encode('utf-8', 'replace') - else: - s = s.decode(self.encoding, 'replace').encode('utf-8', 'replace') - - else: - # Need s in str - if isinstance(s, bytes): - s = s.decode(self.encoding, errors='replace') + # Need s in str + if isinstance(s, bytes): + s = s.decode(self.encoding, errors='replace') py_db = self.get_pydb() if py_db is not None: @@ -139,13 +130,8 @@ class IOBuf: return ''.join(b) # bytes on py2, str on py3. def write(self, s): - if IS_PY2: - if isinstance(s, unicode): - # can't use 'errors' as kwargs in py 2.6 - s = s.encode(self.encoding, 'replace') - else: - if isinstance(s, bytes): - s = s.decode(self.encoding, errors='replace') + if isinstance(s, bytes): + s = s.decode(self.encoding, errors='replace') self.buflist.append(s) def isatty(self): @@ -192,7 +178,7 @@ def start_redirect(keep_original_redirection=False, std='stdout', redirect_to=No stack = getattr(_RedirectionsHolder, '_stack_%s' % std) if keep_original_redirection: - wrap_buffer = True if not IS_PY2 and hasattr(redirect_to, 'buffer') else False + wrap_buffer = True if hasattr(redirect_to, 'buffer') else False new_std_instance = IORedirector(getattr(sys, std), redirect_to, wrap_buffer=wrap_buffer) setattr(sys, std, new_std_instance) else: @@ -224,7 +210,7 @@ def redirect_stream_to_pydb_io_messages(std): with _RedirectionsHolder._lock: redirect_to_name = '_pydevd_%s_redirect_' % (std,) if getattr(_RedirectionsHolder, redirect_to_name) is None: - wrap_buffer = True if not IS_PY2 else False + wrap_buffer = True original = getattr(sys, std) redirect_to = RedirectToPyDBIoMessages(1 if std == 'stdout' else 2, original, wrap_buffer) diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_net_command.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_net_command.py index d00945c6..e00c8492 100644 --- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_net_command.py +++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_net_command.py @@ -1,4 +1,4 @@ -from _pydevd_bundle.pydevd_constants import DebugInfoHolder, IS_PY2, \ +from _pydevd_bundle.pydevd_constants import DebugInfoHolder, \ get_global_debugger, GetGlobalDebugger, set_global_debugger # Keep for backward compatibility @UnusedImport from _pydevd_bundle.pydevd_utils import quote_smart as quote, to_string from _pydevd_bundle.pydevd_comm_constants import ID_TO_MEANING, CMD_EXIT @@ -72,13 +72,7 @@ class NetCommand(_BaseNetCommand): self.as_dict = as_dict text = json.dumps(as_dict) - if IS_PY2: - if isinstance(text, unicode): - text = text.encode('utf-8') - else: - assert isinstance(text, str) - else: - assert isinstance(text, str) + assert isinstance(text, str) if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 1: self._show_debug_info(cmd_id, seq, text) @@ -93,15 +87,11 @@ class NetCommand(_BaseNetCommand): else: msg = '%s\t%s\t%s' % (cmd_id, seq, text) - if IS_PY2: - assert isinstance(msg, str) # i.e.: bytes - as_bytes = msg - else: - if isinstance(msg, str): - msg = msg.encode('utf-8') + if isinstance(msg, str): + msg = msg.encode('utf-8') - assert isinstance(msg, bytes) - as_bytes = msg + assert isinstance(msg, bytes) + as_bytes = msg self._as_bytes = as_bytes def send(self, sock): diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_net_command_factory_json.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_net_command_factory_json.py index 58509a23..9d6afbe8 100644 --- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_net_command_factory_json.py +++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_net_command_factory_json.py @@ -16,7 +16,7 @@ from _pydevd_bundle.pydevd_comm_constants import CMD_THREAD_CREATE, CMD_RETURN, CMD_THREAD_RESUME_SINGLE_NOTIFICATION, CMD_THREAD_KILL, CMD_STOP_ON_START, CMD_INPUT_REQUESTED, \ CMD_EXIT, CMD_STEP_INTO_COROUTINE, CMD_STEP_RETURN_MY_CODE, CMD_SMART_STEP_INTO, \ CMD_SET_FUNCTION_BREAK -from _pydevd_bundle.pydevd_constants import get_thread_id, dict_values, ForkSafeLock +from _pydevd_bundle.pydevd_constants import get_thread_id, ForkSafeLock from _pydevd_bundle.pydevd_net_command import NetCommand, NULL_NET_COMMAND from _pydevd_bundle.pydevd_net_command_factory_xml import NetCommandFactory from _pydevd_bundle.pydevd_utils import get_non_pydevd_threads @@ -26,11 +26,7 @@ from _pydevd_bundle.pydevd_additional_thread_info import set_additional_thread_i from _pydevd_bundle import pydevd_frame_utils, pydevd_constants, pydevd_utils import linecache from _pydevd_bundle.pydevd_thread_lifecycle import pydevd_find_thread_by_id - -try: - from StringIO import StringIO -except: - from io import StringIO +from io import StringIO class ModulesManager(object): @@ -86,7 +82,7 @@ class ModulesManager(object): :return list(Module) ''' with self._lock: - return dict_values(self._modules) + return list(self._modules.values()) class NetCommandFactoryJson(NetCommandFactory): diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_net_command_factory_xml.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_net_command_factory_xml.py index 0cb6e1cd..c0a8915a 100644 --- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_net_command_factory_xml.py +++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_net_command_factory_xml.py @@ -27,11 +27,7 @@ from pydevd_tracing import get_exception_traceback_str from _pydev_bundle._pydev_completer import completions_to_xml from _pydev_bundle import pydev_log from _pydevd_bundle.pydevd_frame_utils import FramesList - -try: - from StringIO import StringIO -except: - from io import StringIO +from io import StringIO if IS_IRONPYTHON: diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_process_net_command.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_process_net_command.py index 75a52431..332439ee 100644 --- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_process_net_command.py +++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_process_net_command.py @@ -11,7 +11,7 @@ from _pydevd_bundle.pydevd_breakpoints import get_exception_class from _pydevd_bundle.pydevd_comm import ( InternalEvaluateConsoleExpression, InternalConsoleGetCompletions, InternalRunCustomOperation, internal_get_next_statement_targets, internal_get_smart_step_into_variants) -from _pydevd_bundle.pydevd_constants import IS_PY3K, NEXT_VALUE_SEPARATOR, IS_WINDOWS, IS_PY2, NULL +from _pydevd_bundle.pydevd_constants import NEXT_VALUE_SEPARATOR, IS_WINDOWS, NULL from _pydevd_bundle.pydevd_comm_constants import ID_TO_MEANING, CMD_EXEC_EXPRESSION, CMD_AUTHENTICATE from _pydevd_bundle.pydevd_api import PyDevdAPI from _pydev_bundle.pydev_imports import StringIO @@ -118,9 +118,6 @@ class _PyDevCommandProcessor(object): return self.api.request_suspend_thread(py_db, text.strip()) def cmd_version(self, py_db, cmd_id, seq, text): - if IS_PY2 and isinstance(text, unicode): - text = text.encode('utf-8') - # Default based on server process (although ideally the IDE should # provide it). if IS_WINDOWS: @@ -670,9 +667,6 @@ class _PyDevCommandProcessor(object): def cmd_ignore_thrown_exception_at(self, py_db, cmd_id, seq, text): if text: replace = 'REPLACE:' # Not all 3.x versions support u'REPLACE:', so, doing workaround. - if not IS_PY3K: - replace = unicode(replace) # noqa - if text.startswith(replace): text = text[8:] py_db.filename_to_lines_where_exceptions_are_ignored.clear() @@ -697,9 +691,6 @@ class _PyDevCommandProcessor(object): def cmd_enable_dont_trace(self, py_db, cmd_id, seq, text): if text: true_str = 'true' # Not all 3.x versions support u'str', so, doing workaround. - if not IS_PY3K: - true_str = unicode(true_str) # noqa - mode = text.strip() == true_str pydevd_dont_trace.trace_filter(mode) diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_process_net_command_json.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_process_net_command_json.py index d1449a40..86fa3c18 100644 --- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_process_net_command_json.py +++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_process_net_command_json.py @@ -29,7 +29,7 @@ from _pydevd_bundle.pydevd_json_debug_options import _extract_debug_options, Deb from _pydevd_bundle.pydevd_net_command import NetCommand from _pydevd_bundle.pydevd_utils import convert_dap_log_message_to_expression, ScopeRequest from _pydevd_bundle.pydevd_constants import (PY_IMPL_NAME, DebugInfoHolder, PY_VERSION_STR, - PY_IMPL_VERSION_STR, IS_64BIT_PROCESS, IS_PY2) + PY_IMPL_VERSION_STR, IS_64BIT_PROCESS) from _pydevd_bundle.pydevd_trace_dispatch import USING_CYTHON from _pydevd_frame_eval.pydevd_frame_eval_main import USING_FRAME_EVAL from _pydevd_bundle.pydevd_comm import internal_get_step_in_targets_json @@ -447,9 +447,6 @@ class PyDevJsonCommandProcessor(object): new_watch_dirs = set() for w in watch_dirs: try: - if IS_PY2 and isinstance(w, unicode): - w = w.encode(getfilesystemencoding()) - new_watch_dirs.add(pydevd_file_utils.get_path_with_real_case(pydevd_file_utils.absolute_path(w))) except Exception: pydev_log.exception('Error adding watch dir: %s', w) diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_resolver.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_resolver.py index 7c5e96cf..d981500e 100644 --- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_resolver.py +++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_resolver.py @@ -1,14 +1,11 @@ from _pydev_bundle import pydev_log from _pydevd_bundle.pydevd_utils import hasattr_checked, DAPGrouper, Timer -try: - import StringIO -except: - import io as StringIO +from io import StringIO import traceback from os.path import basename from functools import partial -from _pydevd_bundle.pydevd_constants import dict_iter_items, dict_keys, xrange, IS_PY36_OR_GREATER, \ +from _pydevd_bundle.pydevd_constants import xrange, IS_PY36_OR_GREATER, \ MethodWrapperType, RETURN_VALUES_DICT, DebugInfoHolder, IS_PYPY, GENERATED_LEN_ATTR_NAME from _pydevd_bundle.pydevd_safe_repr import SafeRepr @@ -75,7 +72,7 @@ class DefaultResolver: else: dct = self._get_jy_dictionary(obj)[0] - lst = sorted(dict_iter_items(dct), key=lambda tup: sorted_attributes_key(tup[0])) + lst = sorted(dct.items(), key=lambda tup: sorted_attributes_key(tup[0])) if used___dict__: eval_name = '.__dict__[%s]' else: @@ -158,7 +155,7 @@ class DefaultResolver: names = [] if not names: if hasattr_checked(var, '__dict__'): - names = dict_keys(var.__dict__) + names = list(var.__dict__) used___dict__ = True return names, used___dict__ @@ -202,7 +199,7 @@ class DefaultResolver: continue except: # if some error occurs getting it, let's put it to the user. - strIO = StringIO.StringIO() + strIO = StringIO() traceback.print_exc(file=strIO) attr = strIO.getvalue() @@ -251,7 +248,7 @@ class DictResolver: sort_keys = not IS_PY36_OR_GREATER - def resolve(self, dict, key): + def resolve(self, dct, key): if key in (GENERATED_LEN_ATTR_NAME, TOO_LARGE_ATTR): return None @@ -259,14 +256,14 @@ class DictResolver: # we have to treat that because the dict resolver is also used to directly resolve the global and local # scopes (which already have the items directly) try: - return dict[key] + return dct[key] except: - return getattr(dict, key) + return getattr(dct, key) # ok, we have to iterate over the items to find the one that matches the id, because that's the only way # to actually find the reference from the string we have before. expected_id = int(key.split('(')[-1][:-1]) - for key, val in dict_iter_items(dict): + for key, val in dct.items(): if id(key) == expected_id: return val @@ -299,7 +296,7 @@ class DictResolver: found_representations = set() - for key, val in dict_iter_items(dct): + for key, val in dct.items(): i += 1 key_as_str = self.key_to_str(key, fmt) @@ -334,11 +331,11 @@ class DictResolver: ret.append((GENERATED_LEN_ATTR_NAME, len(dct), partial(_apply_evaluate_name, evaluate_name='len(%s)'))) return ret - def get_dictionary(self, dict): + def get_dictionary(self, dct): ret = self.init_dict() i = 0 - for key, val in dict_iter_items(dict): + for key, val in dct.items(): i += 1 # we need to add the id because otherwise we cannot find the real object to get its contents later on. key = '%s (%s)' % (self.key_to_str(key), id(key)) @@ -348,9 +345,9 @@ class DictResolver: break # in case if the class extends built-in type and has some additional fields - additional_fields = defaultResolver.get_dictionary(dict) + additional_fields = defaultResolver.get_dictionary(dct) ret.update(additional_fields) - ret[GENERATED_LEN_ATTR_NAME] = len(dict) + ret[GENERATED_LEN_ATTR_NAME] = len(dct) return ret @@ -556,15 +553,15 @@ class JyArrayResolver: #======================================================================================================================= class MultiValueDictResolver(DictResolver): - def resolve(self, dict, key): + def resolve(self, dct, key): if key in (GENERATED_LEN_ATTR_NAME, TOO_LARGE_ATTR): return None # ok, we have to iterate over the items to find the one that matches the id, because that's the only way # to actually find the reference from the string we have before. expected_id = int(key.split('(')[-1][:-1]) - for key in dict_keys(dict): - val = dict.getlist(key) + for key in list(dct.keys()): + val = dct.getlist(key) if id(key) == expected_id: return val diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_safe_repr.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_safe_repr.py index 56b384a6..8b255c31 100644 --- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_safe_repr.py +++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_safe_repr.py @@ -4,7 +4,7 @@ # Gotten from ptvsd for supporting the format expected there. import sys -from _pydevd_bundle.pydevd_constants import IS_PY2, IS_PY36_OR_GREATER +from _pydevd_bundle.pydevd_constants import IS_PY36_OR_GREATER import locale from _pydev_bundle import pydev_log @@ -93,10 +93,7 @@ class SafeRepr(object): Returns bytes encoded as utf-8 on py2 and str on py3. ''' try: - if IS_PY2: - return ''.join((x.encode('utf-8') if isinstance(x, unicode) else x) for x in self._repr(obj, 0)) - else: - return ''.join(self._repr(obj, 0)) + return ''.join(self._repr(obj, 0)) except Exception: try: return 'An exception was raised: %r' % sys.exc_info()[1] @@ -387,56 +384,11 @@ class SafeRepr(object): # you are using the wrong class. left_count, right_count = max(1, int(2 * limit / 3)), max(1, int(limit / 3)) # noqa - if IS_PY2 and isinstance(obj_repr, self.bytes): - # If we can convert to unicode before slicing, that's better (but don't do - # it if it's not possible as we may be dealing with actual binary data). - - obj_repr = self._bytes_as_unicode_if_possible(obj_repr) - if isinstance(obj_repr, unicode): - # Deal with high-surrogate leftovers on Python 2. - try: - if left_count > 0 and unichr(0xD800) <= obj_repr[left_count - 1] <= unichr(0xDBFF): - left_count -= 1 - except ValueError: - # On Jython unichr(0xD800) will throw an error: - # ValueError: unichr() arg is a lone surrogate in range (0xD800, 0xDFFF) (Jython UTF-16 encoding) - # Just ignore it in this case. - pass - - start = obj_repr[:left_count] - - # Note: yielding unicode is fine (it'll be properly converted to utf-8 if needed). - yield start - yield '...' - - # Deal with high-surrogate leftovers on Python 2. - try: - if right_count > 0 and unichr(0xD800) <= obj_repr[-right_count - 1] <= unichr(0xDBFF): - right_count -= 1 - except ValueError: - # On Jython unichr(0xD800) will throw an error: - # ValueError: unichr() arg is a lone surrogate in range (0xD800, 0xDFFF) (Jython UTF-16 encoding) - # Just ignore it in this case. - pass - - yield obj_repr[-right_count:] - return - else: - # We can't decode it (binary string). Use repr() of bytes. - obj_repr = repr(obj_repr) - yield obj_repr[:left_count] yield '...' yield obj_repr[-right_count:] def _convert_to_unicode_or_bytes_repr(self, obj_repr): - if IS_PY2 and isinstance(obj_repr, self.bytes): - obj_repr = self._bytes_as_unicode_if_possible(obj_repr) - if isinstance(obj_repr, self.bytes): - # If we haven't been able to decode it this means it's some binary data - # we can't make sense of, so, we need its repr() -- otherwise json - # encoding may break later on. - obj_repr = repr(obj_repr) return obj_repr def _bytes_as_unicode_if_possible(self, obj_repr): diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_signature.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_signature.py index dc1c9846..bf990f9c 100644 --- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_signature.py +++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_signature.py @@ -10,7 +10,7 @@ else: import os from _pydevd_bundle.pydevd_comm import CMD_SIGNATURE_CALL_TRACE, NetCommand from _pydevd_bundle import pydevd_xml -from _pydevd_bundle.pydevd_constants import xrange, dict_iter_items +from _pydevd_bundle.pydevd_constants import xrange from _pydevd_bundle.pydevd_utils import get_clsname_for_code @@ -62,7 +62,7 @@ def get_type_of_value(value, ignore_module_name=('__main__', '__builtin__', 'bui if class_name == 'dict': class_name = 'Dict' if len(value) > 0 and recursive: - for (k, v) in dict_iter_items(value): + for (k, v) in value.items(): class_name += '[%s, %s]' % (get_type_of_value(k, recursive=recursive), get_type_of_value(v, recursive=recursive)) break diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_source_mapping.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_source_mapping.py index 9f6098c1..54553071 100644 --- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_source_mapping.py +++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_source_mapping.py @@ -1,5 +1,5 @@ import bisect -from _pydevd_bundle.pydevd_constants import dict_items, NULL, KeyifyList +from _pydevd_bundle.pydevd_constants import NULL, KeyifyList import pydevd_file_utils @@ -86,7 +86,7 @@ class SourceMapping(object): try: return self._cache[key] except KeyError: - for _, mapping in dict_items(self._mappings_to_server): + for _, mapping in list(self._mappings_to_server.items()): for map_entry in mapping: if map_entry.runtime_source == runtime_source_filename: # if map_entry.contains_runtime_line(lineno): # matches line range @@ -107,7 +107,7 @@ class SourceMapping(object): try: return self._cache[key] except KeyError: - for _absolute_normalized_filename, mapping in dict_items(self._mappings_to_server): + for _absolute_normalized_filename, mapping in list(self._mappings_to_server.items()): for map_entry in mapping: if map_entry.runtime_source == runtime_source_filename: self._cache[key] = True diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_stackless.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_stackless.py index c6901408..83ea8ff0 100644 --- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_stackless.py +++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_stackless.py @@ -6,7 +6,6 @@ import sys from _pydevd_bundle.pydevd_comm import get_global_debugger from _pydevd_bundle.pydevd_constants import call_only_once from _pydev_imps._pydev_saved_modules import threading -from _pydevd_bundle.pydevd_constants import dict_items from _pydevd_bundle.pydevd_custom_frames import update_custom_frame, remove_custom_frame, add_custom_frame import stackless # @UnresolvedImport from _pydev_bundle import pydev_log @@ -201,7 +200,7 @@ def _schedule_callback(prev, next): register_tasklet_info(prev) try: - for tasklet_ref, tasklet_info in dict_items(_weak_tasklet_registered_to_info): # Make sure it's a copy! + for tasklet_ref, tasklet_info in list(_weak_tasklet_registered_to_info.items()): # Make sure it's a copy! tasklet = tasklet_ref() if tasklet is None or not tasklet.alive: # Garbage-collected already! @@ -276,7 +275,7 @@ if not hasattr(stackless.tasklet, "trace_function"): register_tasklet_info(prev) try: - for tasklet_ref, tasklet_info in dict_items(_weak_tasklet_registered_to_info): # Make sure it's a copy! + for tasklet_ref, tasklet_info in list(_weak_tasklet_registered_to_info.items()): # Make sure it's a copy! tasklet = tasklet_ref() if tasklet is None or not tasklet.alive: # Garbage-collected already! diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_suspended_frames.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_suspended_frames.py index 374bae6e..53337025 100644 --- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_suspended_frames.py +++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_suspended_frames.py @@ -1,8 +1,8 @@ from contextlib import contextmanager import sys -from _pydevd_bundle.pydevd_constants import get_frame, dict_items, RETURN_VALUES_DICT, \ - dict_iter_items, ForkSafeLock, GENERATED_LEN_ATTR_NAME, silence_warnings_decorator +from _pydevd_bundle.pydevd_constants import get_frame, RETURN_VALUES_DICT, \ + ForkSafeLock, GENERATED_LEN_ATTR_NAME, silence_warnings_decorator from _pydevd_bundle.pydevd_xml import get_variable_details, get_type from _pydev_bundle.pydev_override import overrides from _pydevd_bundle.pydevd_resolver import sorted_attributes_key, TOO_LARGE_ATTR, get_var_scope @@ -169,8 +169,7 @@ class _ObjectVariable(_AbstractVariable): else: # If there's no special implementation, the default is sorting the keys. dct = resolver.get_dictionary(self.value) - lst = dict_items(dct) - lst.sort(key=lambda tup: sorted_attributes_key(tup[0])) + lst = sorted(dct.items(), key=lambda tup: sorted_attributes_key(tup[0])) # No evaluate name in this case. lst = [(key, value, None) for (key, value) in lst] @@ -276,7 +275,7 @@ class _FrameVariable(_AbstractVariable): else: raise AssertionError('Unexpected scope: %s' % (scope,)) - lst, group_entries = self._group_entries([(x[0], x[1], None) for x in dict_items(dct) if x[0] != '_pydev_stop_at_break'], handle_return_values=True) + lst, group_entries = self._group_entries([(x[0], x[1], None) for x in list(dct.items()) if x[0] != '_pydev_stop_at_break'], handle_return_values=True) group_variables = [] for key, val, _ in group_entries: @@ -288,7 +287,7 @@ class _FrameVariable(_AbstractVariable): for key, val, _ in lst: is_return_value = key == RETURN_VALUES_DICT if is_return_value: - for return_key, return_value in dict_iter_items(val): + for return_key, return_value in val.items(): variable = _ObjectVariable( self.py_db, return_key, return_value, self._register_variable, is_return_value, '%s[%r]' % (key, return_key), frame=self.frame) children_variables.append(variable) @@ -452,7 +451,7 @@ class SuspendedFramesManager(object): if tracker is not None: return tracker - for _thread_id, tracker in dict_iter_items(self._thread_id_to_tracker): + for _thread_id, tracker in self._thread_id_to_tracker.items(): try: tracker.get_variable(variable_reference) except KeyError: diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_traceproperty.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_traceproperty.py index 2bfebc4d..2f38e4be 100644 --- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_traceproperty.py +++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_traceproperty.py @@ -1,7 +1,6 @@ '''For debug purpose we are replacing actual builtin property by the debug property ''' from _pydevd_bundle.pydevd_comm import get_global_debugger -from _pydevd_bundle.pydevd_constants import DebugInfoHolder, IS_PY2 from _pydev_bundle import pydev_log @@ -12,18 +11,11 @@ def replace_builtin_property(new_property=None): if new_property is None: new_property = DebugProperty original = property - if IS_PY2: - try: - import __builtin__ - __builtin__.__dict__['property'] = new_property - except: - pydev_log.exception() # @Reimport - else: - try: - import builtins # Python 3.0 does not have the __builtin__ module @UnresolvedImport - builtins.__dict__['property'] = new_property - except: - pydev_log.exception() # @Reimport + try: + import builtins + builtins.__dict__['property'] = new_property + except: + pydev_log.exception() # @Reimport return original diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_utils.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_utils.py index 47ce11a7..5a5ae8af 100644 --- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_utils.py +++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_utils.py @@ -9,15 +9,12 @@ import os import ctypes from importlib import import_module -try: - from urllib import quote -except: - from urllib.parse import quote # @UnresolvedImport +from urllib.parse import quote # @UnresolvedImport import time import inspect import sys -from _pydevd_bundle.pydevd_constants import IS_PY3K, USE_CUSTOM_SYS_CURRENT_FRAMES, IS_PYPY, SUPPORT_GEVENT, \ +from _pydevd_bundle.pydevd_constants import USE_CUSTOM_SYS_CURRENT_FRAMES, IS_PYPY, SUPPORT_GEVENT, \ GEVENT_SUPPORT_NOT_SET_MSG, GENERATED_LEN_ATTR_NAME, PYDEVD_WARN_SLOW_RESOLVE_TIMEOUT, \ get_global_debugger from _pydev_imps._pydev_saved_modules import threading @@ -94,19 +91,12 @@ def compare_object_attrs_key(x): return (-1, to_string(x)) -if IS_PY3K: - - def is_string(x): - return isinstance(x, str) - -else: - - def is_string(x): - return isinstance(x, basestring) +def is_string(x): + return isinstance(x, str) def to_string(x): - if is_string(x): + if isinstance(x, str): return x else: return str(x) @@ -117,18 +107,8 @@ def print_exc(): traceback.print_exc() -if IS_PY3K: - - def quote_smart(s, safe='/'): - return quote(s, safe) - -else: - - def quote_smart(s, safe='/'): - if isinstance(s, unicode): - s = s.encode('utf-8') - - return quote(s, safe) +def quote_smart(s, safe='/'): + return quote(s, safe) def get_clsname_for_code(code, frame): diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_vars.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_vars.py index a50f302e..12d35d46 100644 --- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_vars.py +++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_vars.py @@ -2,26 +2,19 @@ resolution/conversion to XML. """ import pickle -from _pydevd_bundle.pydevd_constants import get_frame, get_current_thread_id, xrange, IS_PY2, \ - iter_chars, silence_warnings_decorator, dict_iter_items +from _pydevd_bundle.pydevd_constants import get_frame, get_current_thread_id, xrange, \ + iter_chars, silence_warnings_decorator from _pydevd_bundle.pydevd_xml import ExceptionOnEvaluate, get_type, var_to_xml from _pydev_bundle import pydev_log -import codecs -import os import functools from _pydevd_bundle.pydevd_thread_lifecycle import resume_threads, mark_thread_suspended, suspend_all_threads from _pydevd_bundle.pydevd_comm_constants import CMD_SET_BREAK -try: - from StringIO import StringIO -except ImportError: - from io import StringIO import sys # @Reimport from _pydev_imps._pydev_saved_modules import threading -import traceback -from _pydevd_bundle import pydevd_save_locals, pydevd_timeout, pydevd_constants, pydevd_utils +from _pydevd_bundle import pydevd_save_locals, pydevd_timeout, pydevd_constants from _pydev_bundle.pydev_imports import Exec, execfile from _pydevd_bundle.pydevd_utils import to_string @@ -244,13 +237,6 @@ def _expression_to_evaluate(expression): else: expression = u''.join(new_lines) - if IS_PY2 and isinstance(expression, unicode): - # In Python 2 we need to compile with bytes and not unicode (otherwise it'd use - # the default encoding which could be ascii). - # See https://github.com/microsoft/ptvsd/issues/1864 and https://bugs.python.org/issue18870 - # for why we're using the utf-8 bom. - # i.e.: ... if an utf-8 bom is present, it is considered utf-8 in eval/exec. - expression = codecs.BOM_UTF8 + expression.encode('utf-8') return expression @@ -267,9 +253,6 @@ def eval_in_context(expression, globals, locals=None): # Ok, we have the initial error message, but let's see if we're dealing with a name mangling error... try: - if IS_PY2 and isinstance(expression, unicode): - expression = expression.encode('utf-8') - if '.__' in expression: # Try to handle '__' name mangling (for simple cases such as self.__variable.__another_var). split = expression.split('.') @@ -395,7 +378,7 @@ def _update_globals_and_locals(updated_globals, initial_globals, frame): # one that enabled creating and using variables during the same evaluation. assert updated_globals is not None changed = False - for key, val in dict_iter_items(updated_globals): + for key, val in updated_globals.items(): if initial_globals.get(key) is not val: changed = True frame.f_locals[key] = val @@ -466,10 +449,7 @@ def evaluate_expression(py_db, frame, expression, is_exec): updated_locals = None try: - if IS_PY2 and isinstance(expression, unicode): - expression = expression.replace(u'@LINE@', u'\n') - else: - expression = expression.replace('@LINE@', '\n') + expression = expression.replace('@LINE@', '\n') if is_exec: try: @@ -488,11 +468,6 @@ def evaluate_expression(py_db, frame, expression, is_exec): else: result = eval(compiled, updated_globals, updated_locals) if result is not None: # Only print if it's not None (as python does) - if IS_PY2 and isinstance(result, unicode): - encoding = sys.stdout.encoding - if not encoding: - encoding = os.environ.get('PYTHONIOENCODING', 'utf-8') - result = result.encode(encoding, 'replace') sys.stdout.write('%s\n' % (result,)) return diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_xml.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_xml.py index 6460d011..85c51a61 100644 --- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_xml.py +++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_xml.py @@ -2,12 +2,11 @@ from _pydev_bundle import pydev_log from _pydevd_bundle import pydevd_extension_utils from _pydevd_bundle import pydevd_resolver import sys -from _pydevd_bundle.pydevd_constants import dict_iter_items, dict_keys, IS_PY3K, \ - BUILTINS_MODULE_NAME, MAXIMUM_VARIABLE_REPRESENTATION_SIZE, RETURN_VALUES_DICT, LOAD_VALUES_ASYNC, \ - DEFAULT_VALUE +from _pydevd_bundle.pydevd_constants import BUILTINS_MODULE_NAME, MAXIMUM_VARIABLE_REPRESENTATION_SIZE, \ + RETURN_VALUES_DICT, LOAD_VALUES_ASYNC, DEFAULT_VALUE from _pydev_bundle.pydev_imports import quote from _pydevd_bundle.pydevd_extension_api import TypeResolveProvider, StrPresentationProvider -from _pydevd_bundle.pydevd_utils import isinstance_checked, hasattr_checked, DAPGrouper, Timer +from _pydevd_bundle.pydevd_utils import isinstance_checked, hasattr_checked, DAPGrouper from _pydevd_bundle.pydevd_resolver import get_var_scope try: @@ -253,25 +252,21 @@ def should_evaluate_full_value(val): def return_values_from_dict_to_xml(return_dict): - res = "" - for name, val in dict_iter_items(return_dict): - res += var_to_xml(val, name, additional_in_xml=' isRetVal="True"') - return res + res = [] + for name, val in return_dict.items(): + res.append(var_to_xml(val, name, additional_in_xml=' isRetVal="True"')) + return ''.join(res) def frame_vars_to_xml(frame_f_locals, hidden_ns=None): """ dumps frame variables to XML """ - xml = "" + xml = [] - keys = dict_keys(frame_f_locals) - if hasattr(keys, 'sort'): - keys.sort() # Python 3.0 does not have it - else: - keys = sorted(keys) # Jython 2.1 does not have it + keys = sorted(frame_f_locals) - return_values_xml = '' + return_values_xml = [] for k in keys: try: @@ -282,20 +277,21 @@ def frame_vars_to_xml(frame_f_locals, hidden_ns=None): continue if k == RETURN_VALUES_DICT: - for name, val in dict_iter_items(v): - return_values_xml += var_to_xml(val, name, additional_in_xml=' isRetVal="True"') + for name, val in v.items(): + return_values_xml.append(var_to_xml(val, name, additional_in_xml=' isRetVal="True"')) else: if hidden_ns is not None and k in hidden_ns: - xml += var_to_xml(v, str(k), additional_in_xml=' isIPythonHidden="True"', - evaluate_full_value=eval_full_val) + xml.append(var_to_xml(v, str(k), additional_in_xml=' isIPythonHidden="True"', + evaluate_full_value=eval_full_val)) else: - xml += var_to_xml(v, str(k), evaluate_full_value=eval_full_val) + xml.append(var_to_xml(v, str(k), evaluate_full_value=eval_full_val)) except Exception: pydev_log.exception("Unexpected error, recovered safely.") # Show return values as the first entry. - return return_values_xml + xml + return_values_xml.extend(xml) + return ''.join(return_values_xml) def get_variable_details(val, evaluate_full_value=True, to_string=None): @@ -357,12 +353,8 @@ def get_variable_details(val, evaluate_full_value=True, to_string=None): # fix to work with unicode values try: - if not IS_PY3K: - if value.__class__ == unicode: # @UndefinedVariable - value = value.encode('utf-8', 'replace') - else: - if value.__class__ == bytes: - value = value.decode('utf-8', 'replace') + if value.__class__ == bytes: + value = value.decode('utf-8', 'replace') except TypeError: pass diff --git a/src/debugpy/_vendored/pydevd/build_tools/build_binaries_osx.py b/src/debugpy/_vendored/pydevd/build_tools/build_binaries_osx.py index 795b2d4b..a3e376ff 100644 --- a/src/debugpy/_vendored/pydevd/build_tools/build_binaries_osx.py +++ b/src/debugpy/_vendored/pydevd/build_tools/build_binaries_osx.py @@ -7,8 +7,6 @@ import sys miniconda64_envs = os.getenv('MINICONDA64_ENVS') python_installations = [ - r'%s/py26_64/bin/python' % miniconda64_envs, - r'%s/py27_64/bin/python' % miniconda64_envs, r'%s/py34_64/bin/python' % miniconda64_envs, r'%s/py35_64/bin/python' % miniconda64_envs, r'%s/py36_64/bin/python' % miniconda64_envs, diff --git a/src/debugpy/_vendored/pydevd/build_tools/build_binaries_windows.py b/src/debugpy/_vendored/pydevd/build_tools/build_binaries_windows.py index d9337c9a..52b20567 100644 --- a/src/debugpy/_vendored/pydevd/build_tools/build_binaries_windows.py +++ b/src/debugpy/_vendored/pydevd/build_tools/build_binaries_windows.py @@ -16,17 +16,18 @@ import sys miniconda_envs = os.getenv('MINICONDA_ENVS', r'C:\bin\Miniconda3\envs') python_installations = [ - r'%s\py27_32\python.exe' % miniconda_envs, + r'%s\py36_32\python.exe' % miniconda_envs, r'%s\py37_32\python.exe' % miniconda_envs, r'%s\py38_32\python.exe' % miniconda_envs, r'%s\py39_32\python.exe' % miniconda_envs, + r'%s\py310_32\python.exe' % miniconda_envs, - r'%s\py27_64\python.exe' % miniconda_envs, r'%s\py36_64\python.exe' % miniconda_envs, r'%s\py37_64\python.exe' % miniconda_envs, r'%s\py38_64\python.exe' % miniconda_envs, r'%s\py39_64\python.exe' % miniconda_envs, + r'%s\py310_64\python.exe' % miniconda_envs, ] root_dir = os.path.dirname(os.path.dirname(__file__)) @@ -69,12 +70,14 @@ def main(): python_install, os.path.join(root_dir, 'build_tools', 'build.py'), '--no-remove-binaries', '--target-pyd-name=%s' % new_name, '--force-cython'] if i != 0: args.append('--no-regenerate-files') - version_number = extract_version(python_install) - if version_number.startswith('36') or version_number.startswith('37'): - name_frame_eval = 'pydevd_frame_evaluator_%s_%s' % (sys.platform, extract_version(python_install)) - args.append('--target-pyd-frame-eval=%s' % name_frame_eval) + name_frame_eval = 'pydevd_frame_evaluator_%s_%s' % (sys.platform, extract_version(python_install)) + args.append('--target-pyd-frame-eval=%s' % name_frame_eval) print('Calling: %s' % (' '.join(args))) - subprocess.check_call(args) + + env = os.environ.copy() + python_exe_dir = os.path.dirname(python_install) + env['PATH'] = env['PATH'] + ';' + os.path.join(python_exe_dir, 'DLLs') + ';' + os.path.join(python_exe_dir, 'Library', 'bin') + subprocess.check_call(args, env=env) if __name__ == '__main__': diff --git a/src/debugpy/_vendored/pydevd/build_tools/generate_code.py b/src/debugpy/_vendored/pydevd/build_tools/generate_code.py index 8245b1ec..44c8ead2 100644 --- a/src/debugpy/_vendored/pydevd/build_tools/generate_code.py +++ b/src/debugpy/_vendored/pydevd/build_tools/generate_code.py @@ -103,8 +103,6 @@ def generate_dont_trace_files(): # DO NOT edit manually! # DO NOT edit manually! -from _pydevd_bundle.pydevd_constants import IS_PY3K - LIB_FILE = 1 PYDEV_FILE = 2 @@ -128,14 +126,13 @@ DONT_TRACE = { %(pydev_files)s } -if IS_PY3K: - # if we try to trace io.py it seems it can get halted (see http://bugs.python.org/issue4716) - DONT_TRACE['io.py'] = LIB_FILE +# if we try to trace io.py it seems it can get halted (see http://bugs.python.org/issue4716) +DONT_TRACE['io.py'] = LIB_FILE - # Don't trace common encodings too - DONT_TRACE['cp1252.py'] = LIB_FILE - DONT_TRACE['utf_8.py'] = LIB_FILE - DONT_TRACE['codecs.py'] = LIB_FILE +# Don't trace common encodings too +DONT_TRACE['cp1252.py'] = LIB_FILE +DONT_TRACE['utf_8.py'] = LIB_FILE +DONT_TRACE['codecs.py'] = LIB_FILE ''' pydev_files = [] diff --git a/src/debugpy/_vendored/pydevd/build_tools/pydevd_release_process.txt b/src/debugpy/_vendored/pydevd/build_tools/pydevd_release_process.txt index ae0ea5b1..4b85d63e 100644 --- a/src/debugpy/_vendored/pydevd/build_tools/pydevd_release_process.txt +++ b/src/debugpy/_vendored/pydevd/build_tools/pydevd_release_process.txt @@ -9,13 +9,6 @@ __version_info__ in pydevd.py set CONDA_FORCE_32BIT=1 -conda create -y -f -n py27_32 python=2.7 cython numpy nose ipython pip -activate py27_32 -pip install "django>=1.7,<1.8" -pip install -U "setuptools>=0.9" -pip install -U "pip>=1.4" "wheel>=0.21" twine -conda deactivate - conda create -y -f -n py36_32 python=3.6 cython numpy nose ipython pip activate py36_32 pip install "django>=1.9" @@ -44,17 +37,16 @@ pip install -U "setuptools>=0.9" pip install -U "pip>=1.4" "wheel>=0.21" twine conda deactivate - -set CONDA_FORCE_32BIT= - -conda create -y -f -n py27_64 python=2.7 cython numpy nose ipython pip -activate py27_64 -pip install "django>=1.7,<1.8" +conda create -y -f -n py310_32 python=3.10 cython pip +activate py310_32 +pip install "django>=1.9" pip install -U "setuptools>=0.9" pip install -U "pip>=1.4" "wheel>=0.21" twine conda deactivate +set CONDA_FORCE_32BIT= + conda create -y -f -n py36_64 python=3.6 cython numpy nose ipython pip activate py36_64 pip install "django>=1.9" @@ -83,50 +75,57 @@ pip install -U "setuptools>=0.9" pip install -U "pip>=1.4" "wheel>=0.21" twine conda deactivate +conda create -y -f -n py310_64 python=3.10 cython pip +activate py310_64 +pip install "django>=1.9" +pip install -U "setuptools>=0.9" +pip install -U "pip>=1.4" "wheel>=0.21" twine +conda deactivate + ### UPDATE CYTHON set CONDA_FORCE_32BIT=1 -activate py27_32 -conda update -y cython -conda deactivate - activate py36_32 -conda update -y cython +pip install cython==0.29.28 conda deactivate activate py37_32 -conda update -y cython +pip install cython==0.29.28 conda deactivate activate py38_32 -conda update -y cython +pip install cython==0.29.28 conda deactivate activate py39_32 -conda update -y cython +pip install cython==0.29.28 +conda deactivate + +activate py310_32 +pip install cython==0.29.28 conda deactivate set CONDA_FORCE_32BIT= -activate py27_64 -conda update -y cython -conda deactivate - activate py36_64 -conda update -y cython +pip install cython==0.29.28 conda deactivate activate py37_64 -conda update -y cython +pip install cython==0.29.28 conda deactivate activate py38_64 -conda update -y cython +pip install cython==0.29.28 conda deactivate activate py39_64 -conda update -y cython +pip install cython==0.29.28 +conda deactivate + +activate py310_64 +pip install cython==0.29.28 conda deactivate 4. Regenerate the .pyx and .c @@ -159,11 +158,6 @@ C:\bin\Python38-32\python build_tools\build_binaries_windows.py rm dist/pydevd* -activate py27_32 -python setup.py sdist bdist_wheel -conda deactivate -dir dist - activate py36_32 python setup.py sdist bdist_wheel conda deactivate @@ -184,12 +178,7 @@ python setup.py sdist bdist_wheel conda deactivate dir dist -activate py27_64 -python setup.py sdist bdist_wheel -conda deactivate -dir dist - -activate py35_64 +activate py310_32 python setup.py sdist bdist_wheel conda deactivate dir dist @@ -214,9 +203,14 @@ python setup.py sdist bdist_wheel conda deactivate dir dist +activate py310_64 +python setup.py sdist bdist_wheel +conda deactivate +dir dist + # Note: uploading with twine gives an error in the end, but apparently it works (check final result in pypi). twine upload dist/pydevd* -git tag pydev_debugger_2_3_0 -a -m "PyDev.Debugger 2.3.0" +git tag pydev_debugger_2_8_0 -a -m "PyDev.Debugger 2.8.0" git push --tags diff --git a/src/debugpy/_vendored/pydevd/build_tools/rename_pep8.py b/src/debugpy/_vendored/pydevd/build_tools/rename_pep8.py index 6e545da6..644ac11a 100644 --- a/src/debugpy/_vendored/pydevd/build_tools/rename_pep8.py +++ b/src/debugpy/_vendored/pydevd/build_tools/rename_pep8.py @@ -8,12 +8,15 @@ import names_to_rename _CAMEL_RE = re.compile(r'(?<=[a-z])([A-Z])') _CAMEL_DEF_RE = re.compile(r'(def )((([A-Z0-9]+|[a-z0-9])[a-z][a-z0-9]*[A-Z]|[a-z0-9]*[A-Z][A-Z0-9]*[a-z])[A-Za-z0-9]*)') + def _normalize(name): return _CAMEL_RE.sub(lambda x: '_' + x.group(1).lower(), name).lower() + def find_matches_in_contents(contents): return [x[1] for x in re.findall(_CAMEL_DEF_RE, contents)] + def iter_files_in_dir(dirname): for root, dirs, files in os.walk(dirname): for name in ('pydevd_attach_to_process', '.git', 'stubs', 'pydev_ipython', 'third_party', 'pydev_ipython'): @@ -29,12 +32,14 @@ def iter_files_in_dir(dirname): yield path, initial_contents + def find_matches(): found = set() for path, initial_contents in iter_files_in_dir(os.path.dirname(os.path.dirname(__file__))): found.update(find_matches_in_contents(initial_contents)) - print '\n'.join(sorted(found)) - print 'Total', len(found) + print('\n'.join(sorted(found))) + print('Total', len(found)) + def substitute_contents(re_name_to_new_val, initial_contents): contents = initial_contents @@ -42,13 +47,14 @@ def substitute_contents(re_name_to_new_val, initial_contents): contents = re.sub(key, val, contents) return contents + def make_replace(): re_name_to_new_val = load_re_to_new_val(names_to_rename.NAMES) # traverse root directory, and list directories as dirs and files as files for path, initial_contents in iter_files_in_dir(os.path.dirname(os.path.dirname(__file__))): contents = substitute_contents(re_name_to_new_val, initial_contents) if contents != initial_contents: - print 'Changed something at: %s' % (path,) + print('Changed something at: %s' % (path,)) for val in re_name_to_new_val.itervalues(): # Check in initial contents to see if it already existed! @@ -64,9 +70,10 @@ def load_re_to_new_val(names): for n in names.splitlines(): n = n.strip() if not n.startswith('#') and n: - name_to_new_val[r'\b'+n+r'\b'] = _normalize(n) + name_to_new_val[r'\b' + n + r'\b'] = _normalize(n) return name_to_new_val + def test(): assert _normalize('RestoreSysSetTraceFunc') == 'restore_sys_set_trace_func' assert _normalize('restoreSysSetTraceFunc') == 'restore_sys_set_trace_func' @@ -116,6 +123,7 @@ dict_pop dict_values ''' + if __name__ == '__main__': # find_matches() make_replace() diff --git a/src/debugpy/_vendored/pydevd/interpreterInfo.py b/src/debugpy/_vendored/pydevd/interpreterInfo.py index 40c4ebe7..de6aa003 100644 --- a/src/debugpy/_vendored/pydevd/interpreterInfo.py +++ b/src/debugpy/_vendored/pydevd/interpreterInfo.py @@ -15,12 +15,15 @@ import sys try: import os.path + def fully_normalize_path(path): '''fixes the path so that the format of the path really reflects the directories in the system ''' return os.path.normpath(path) + join = os.path.join except: # ImportError or AttributeError. + # See: http://stackoverflow.com/questions/10254353/error-while-installing-jython-for-pydev def fully_normalize_path(path): '''fixes the path so that the format of the path really reflects the directories in the system @@ -32,7 +35,6 @@ except: # ImportError or AttributeError. return a + b return a + '/' + b - IS_PYTHON_3_ONWARDS = 0 try: @@ -50,11 +52,7 @@ except: if sys.platform == "cygwin": - try: - import ctypes # use from the system if available - except ImportError: - sys.path.append(join(sys.path[0], 'third_party/wrapped_for_pydev')) - import ctypes + import ctypes def native_path(path): MAX_PATH = 512 # On cygwin NT, its 260 lately, but just need BIG ENOUGH buffer @@ -64,17 +62,17 @@ if sys.platform == "cygwin": path = fully_normalize_path(path) path = tobytes(path) CCP_POSIX_TO_WIN_A = 0 - cygwin1dll = ctypes.cdll.LoadLibrary( 'cygwin1.dll' ) + cygwin1dll = ctypes.cdll.LoadLibrary('cygwin1.dll') cygwin1dll.cygwin_conv_path(CCP_POSIX_TO_WIN_A, path, retval, MAX_PATH) return retval.value else: + def native_path(path): return fully_normalize_path(path) - def __getfilesystemencoding(): ''' Note: there's a copy of this method in _pydev_filesystem_encoding.py @@ -100,11 +98,12 @@ def __getfilesystemencoding(): return 'mbcs' return 'utf-8' + def getfilesystemencoding(): try: ret = __getfilesystemencoding() - #Check if the encoding is actually there to be used! + # Check if the encoding is actually there to be used! if hasattr('', 'encode'): ''.encode(ret) if hasattr('', 'decode'): @@ -114,6 +113,7 @@ def getfilesystemencoding(): except: return 'utf-8' + file_system_encoding = getfilesystemencoding() if IS_PYTHON_3_ONWARDS: @@ -132,12 +132,14 @@ def tounicode(s): return s.decode(file_system_encoding) return s + def tobytes(s): if hasattr(s, 'encode'): if not isinstance(s, bytes_type): return s.encode(file_system_encoding) return s + def toasciimxl(s): # output for xml without a declared encoding @@ -179,7 +181,6 @@ if __name__ == '__main__': if sys.platform == "cygwin" and not executable.endswith(tounicode('.exe')): executable += tounicode('.exe') - try: major = str(sys.version_info[0]) minor = str(sys.version_info[1]) @@ -204,7 +205,6 @@ if __name__ == '__main__': prefix = tounicode(native_path(sys.prefix)) # print_ 'prefix is', prefix - result = [] path_used = sys.path @@ -242,7 +242,6 @@ if __name__ == '__main__': for builtinMod in sys.builtin_module_names: contents.append(tounicode('%s') % tounicode(builtinMod)) - contents.append(tounicode('')) unic = tounicode('\n').join(contents) inasciixml = toasciimxl(unic) diff --git a/src/debugpy/_vendored/pydevd/pycompletionserver.py b/src/debugpy/_vendored/pydevd/pycompletionserver.py index d73c9020..97868bac 100644 --- a/src/debugpy/_vendored/pydevd/pycompletionserver.py +++ b/src/debugpy/_vendored/pydevd/pycompletionserver.py @@ -3,14 +3,6 @@ Entry-point module to start the code-completion server for PyDev. @author Fabio Zadrozny ''' -import sys -IS_PYTHON_3_ONWARDS = sys.version_info[0] >= 3 - -if not IS_PYTHON_3_ONWARDS: - import __builtin__ -else: - import builtins as __builtin__ # Python 3.0 - from _pydevd_bundle.pydevd_constants import IS_JYTHON if IS_JYTHON: @@ -24,7 +16,6 @@ else: SERVER_NAME = 'pycompletionserver' from _pydev_bundle import _pydev_imports_tipper - from _pydev_imps._pydev_saved_modules import socket import sys @@ -35,7 +26,6 @@ if sys.platform == "darwin": except: pass - # initial sys.path _sys_path = [] for p in sys.path: @@ -47,20 +37,11 @@ _sys_modules = {} for name, mod in sys.modules.items(): _sys_modules[name] = mod - import traceback -from _pydev_imps._pydev_saved_modules import time +from io import StringIO -try: - import StringIO -except: - import io as StringIO #Python 3.0 - -try: - from urllib import quote_plus, unquote_plus -except ImportError: - from urllib.parse import quote_plus, unquote_plus #Python 3.0 +from urllib.parse import quote_plus, unquote_plus INFO1 = 1 INFO2 = 2 @@ -69,6 +50,7 @@ ERROR = 8 DEBUG = INFO1 | ERROR + def dbg(s, prior): if prior & DEBUG != 0: sys.stdout.write('%s\n' % (s,)) @@ -76,8 +58,9 @@ def dbg(s, prior): # print_ >> f, s # f.close() + from _pydev_bundle import pydev_localhost -HOST = pydev_localhost.get_localhost() # Symbolic name meaning the local host +HOST = pydev_localhost.get_localhost() # Symbolic name meaning the local host MSG_KILL_SERVER = '@@KILL_SERVER_END@@' MSG_COMPLETIONS = '@@COMPLETIONS' @@ -94,10 +77,9 @@ MSG_SEARCH = '@@SEARCH' BUFFER_SIZE = 1024 - - currDirModule = None + def complete_from_dir(directory): ''' This is necessary so that we get the imports from the same directory where the file @@ -173,9 +155,11 @@ class Processor: return '%s(%s)%s' % (MSG_COMPLETIONS, ''.join(compMsg), MSG_END) + class Exit(Exception): pass + class CompletionServer: def __init__(self, port): @@ -185,7 +169,6 @@ class CompletionServer: self.exit_process_on_kill = True self.processor = Processor() - def connect_to_server(self): from _pydev_imps._pydev_saved_modules import socket @@ -225,17 +208,8 @@ class CompletionServer: return totalsent = totalsent + sent - def send(self, msg): - if not hasattr(self.socket, 'sendall'): - #Older versions (jython 2.1) - self.emulated_sendall(msg) - else: - if IS_PYTHON_3_ONWARDS: - self.socket.sendall(bytearray(msg, 'utf-8')) - else: - self.socket.sendall(msg) - + self.socket.sendall(bytearray(msg, 'utf-8')) def run(self): # Echo server program @@ -249,7 +223,6 @@ class CompletionServer: dbg(SERVER_NAME + ' Connected to java server', INFO1) - while not self.ended: data = '' @@ -257,10 +230,7 @@ class CompletionServer: received = self.socket.recv(BUFFER_SIZE) if len(received) == 0: raise Exit() # ok, connection ended - if IS_PYTHON_3_ONWARDS: - data = data + received.decode('utf-8') - else: - data = data + received + data = data + received.decode('utf-8') try: try: @@ -355,13 +325,13 @@ class CompletionServer: try: self.send(msg) except socket.error: - pass # Ok, may be closed already + pass # Ok, may be closed already - raise e # raise original error. + raise e # raise original error. except: dbg(SERVER_NAME + ' exception occurred', ERROR) - s = StringIO.StringIO() + s = StringIO() traceback.print_exc(file=s) err = s.getvalue() @@ -370,8 +340,7 @@ class CompletionServer: try: self.send(msg) except socket.error: - pass # Ok, may be closed already - + pass # Ok, may be closed already finally: log.clear_log() @@ -380,13 +349,12 @@ class CompletionServer: self.ended = True raise Exit() # connection broken - except Exit: if self.exit_process_on_kill: sys.exit(0) # No need to log SystemExit error except: - s = StringIO.StringIO() + s = StringIO() exc_info = sys.exc_info() traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], limit=None, file=s) @@ -395,7 +363,6 @@ class CompletionServer: raise - if __name__ == '__main__': port = int(sys.argv[1]) # this is from where we want to receive messages. diff --git a/src/debugpy/_vendored/pydevd/pydevconsole.py b/src/debugpy/_vendored/pydevd/pydevconsole.py index 9b234aa8..5302e90b 100644 --- a/src/debugpy/_vendored/pydevd/pydevconsole.py +++ b/src/debugpy/_vendored/pydevd/pydevconsole.py @@ -17,7 +17,7 @@ import os import sys from _pydev_imps._pydev_saved_modules import threading -from _pydevd_bundle.pydevd_constants import INTERACTIVE_MODE_AVAILABLE, dict_keys +from _pydevd_bundle.pydevd_constants import INTERACTIVE_MODE_AVAILABLE import traceback from _pydev_bundle import pydev_log @@ -26,16 +26,12 @@ from _pydevd_bundle import pydevd_save_locals from _pydev_bundle.pydev_imports import Exec, _queue -if sys.version_info[0] >= 3: - import builtins as __builtin__ -else: - import __builtin__ +import builtins as __builtin__ from _pydev_bundle.pydev_console_utils import BaseInterpreterInterface, BaseStdIn # @UnusedImport from _pydev_bundle.pydev_console_utils import CodeFragment IS_PYTHON_3_ONWARDS = sys.version_info[0] >= 3 -IS_PY24 = sys.version_info[0] == 2 and sys.version_info[1] == 4 class Command: @@ -138,7 +134,7 @@ def set_debug_hook(debug_hook): def activate_mpl_if_already_imported(interpreter): if interpreter.mpl_modules_for_patching: - for module in dict_keys(interpreter.mpl_modules_for_patching): + for module in list(interpreter.mpl_modules_for_patching): if module in sys.modules: activate_function = interpreter.mpl_modules_for_patching.pop(module) activate_function() @@ -176,7 +172,7 @@ def init_mpl_in_console(interpreter): activate_mpl_if_already_imported(interpreter) from _pydev_bundle.pydev_import_hook import import_hook_manager - for mod in dict_keys(interpreter.mpl_modules_for_patching): + for mod in list(interpreter.mpl_modules_for_patching): import_hook_manager.add_module_name(mod, interpreter.mpl_modules_for_patching.pop(mod)) @@ -371,10 +367,7 @@ def start_console_server(host, port, interpreter): from _pydev_bundle.pydev_imports import SimpleXMLRPCServer as XMLRPCServer # @Reimport try: - if IS_PY24: - server = XMLRPCServer((host, port), logRequests=False) - else: - server = XMLRPCServer((host, port), logRequests=False, allow_none=True) + server = XMLRPCServer((host, port), logRequests=False, allow_none=True) except: sys.stderr.write('Error starting server with host: "%s", port: "%s", client_port: "%s"\n' % (host, port, interpreter.client_port)) diff --git a/src/debugpy/_vendored/pydevd/pydevd.py b/src/debugpy/_vendored/pydevd/pydevd.py index 4b9069b2..45d5bf7c 100644 --- a/src/debugpy/_vendored/pydevd/pydevd.py +++ b/src/debugpy/_vendored/pydevd/pydevd.py @@ -51,9 +51,9 @@ from _pydevd_bundle.pydevd_comm_constants import (CMD_THREAD_SUSPEND, CMD_STEP_I CMD_STEP_INTO_MY_CODE, CMD_STEP_OVER, CMD_SMART_STEP_INTO, CMD_RUN_TO_LINE, CMD_SET_NEXT_STATEMENT, CMD_STEP_RETURN, CMD_ADD_EXCEPTION_BREAK, CMD_STEP_RETURN_MY_CODE, CMD_STEP_OVER_MY_CODE, constant_to_str, CMD_STEP_INTO_COROUTINE) -from _pydevd_bundle.pydevd_constants import (IS_JYTH_LESS25, get_thread_id, get_current_thread_id, - dict_keys, dict_iter_items, DebugInfoHolder, PYTHON_SUSPEND, STATE_SUSPEND, STATE_RUN, get_frame, - clear_cached_thread_id, INTERACTIVE_MODE_AVAILABLE, SHOW_DEBUG_INFO_ENV, IS_PY34_OR_GREATER, IS_PY2, NULL, +from _pydevd_bundle.pydevd_constants import (get_thread_id, get_current_thread_id, + DebugInfoHolder, PYTHON_SUSPEND, STATE_SUSPEND, STATE_RUN, get_frame, + clear_cached_thread_id, INTERACTIVE_MODE_AVAILABLE, SHOW_DEBUG_INFO_ENV, NULL, NO_FTRACE, IS_IRONPYTHON, JSON_PROTOCOL, IS_CPYTHON, HTTP_JSON_PROTOCOL, USE_CUSTOM_SYS_CURRENT_FRAMES_MAP, call_only_once, ForkSafeLock, IGNORE_BASENAMES_STARTING_WITH, EXCEPTION_TYPE_UNHANDLED, SUPPORT_GEVENT) from _pydevd_bundle.pydevd_defaults import PydevdCustomization # Note: import alias used on pydev_monkey. @@ -111,7 +111,7 @@ if SUPPORT_GEVENT: if USE_CUSTOM_SYS_CURRENT_FRAMES_MAP: from _pydevd_bundle.pydevd_constants import constructed_tid_to_last_frame -__version_info__ = (2, 7, 0) +__version_info__ = (2, 8, 0) __version_info_str__ = [] for v in __version_info__: __version_info_str__.append(str(v)) @@ -161,10 +161,7 @@ def install_breakpointhook(pydevd_breakpointhook=None): # Install the breakpoint hook at import time. install_breakpointhook() -SUPPORT_PLUGINS = not IS_JYTH_LESS25 -PluginManager = None -if SUPPORT_PLUGINS: - from _pydevd_bundle.pydevd_plugin_utils import PluginManager +from _pydevd_bundle.pydevd_plugin_utils import PluginManager threadingEnumerate = threading.enumerate threadingCurrentThread = threading.current_thread @@ -885,11 +882,6 @@ class PyDB(object): return eval(condition, new_frame.f_globals, new_frame.f_locals) except Exception as e: - if IS_PY2: - # Must be bytes on py2. - if isinstance(condition, unicode): # noqa - condition = condition.encode('utf-8') - if not isinstance(e, self.skip_print_breakpoint_exception): sys.stderr.write('Error while evaluating expression: %s\n' % (condition,)) @@ -1165,7 +1157,7 @@ class PyDB(object): return self._threads_suspended_single_notification def get_plugin_lazy_init(self): - if self.plugin is None and SUPPORT_PLUGINS: + if self.plugin is None: self.plugin = PluginManager(self) return self.plugin @@ -1533,7 +1525,7 @@ class PyDB(object): # import hook and patches for matplotlib support in debug console from _pydev_bundle.pydev_import_hook import import_hook_manager if is_current_thread_main_thread(): - for module in dict_keys(self.mpl_modules_for_patching): + for module in list(self.mpl_modules_for_patching): import_hook_manager.add_module_name(module, self.mpl_modules_for_patching.pop(module)) def init_gui_support(self): @@ -1574,7 +1566,7 @@ class PyDB(object): if len(self.mpl_modules_for_patching) > 0: if is_current_thread_main_thread(): # Note that we call only in the main thread. - for module in dict_keys(self.mpl_modules_for_patching): + for module in list(self.mpl_modules_for_patching): if module in sys.modules: activate_function = self.mpl_modules_for_patching.pop(module, None) if activate_function is not None: @@ -1775,7 +1767,7 @@ class PyDB(object): def consolidate_breakpoints(self, canonical_normalized_filename, id_to_breakpoint, file_to_line_to_breakpoints): break_dict = {} - for _breakpoint_id, pybreakpoint in dict_iter_items(id_to_breakpoint): + for _breakpoint_id, pybreakpoint in id_to_breakpoint.items(): break_dict[pybreakpoint.line] = pybreakpoint file_to_line_to_breakpoints[canonical_normalized_filename] = break_dict @@ -2016,7 +2008,7 @@ class PyDB(object): with CustomFramesContainer.custom_frames_lock: # @UndefinedVariable from_this_thread = [] - for frame_custom_thread_id, custom_frame in dict_iter_items(CustomFramesContainer.custom_frames): + for frame_custom_thread_id, custom_frame in CustomFramesContainer.custom_frames.items(): if custom_frame.thread_id == thread.ident: frames_tracker.track(thread_id, pydevd_frame_utils.create_frames_list_from_frame(custom_frame.frame), frame_custom_thread_id=frame_custom_thread_id) # print('Frame created as thread: %s' % (frame_custom_thread_id,)) @@ -2230,7 +2222,7 @@ class PyDB(object): try: def get_pydb_daemon_threads_to_wait(): - pydb_daemon_threads = set(dict_keys(self.created_pydb_daemon_threads)) + pydb_daemon_threads = set(self.created_pydb_daemon_threads) pydb_daemon_threads.discard(self.check_alive_thread) pydb_daemon_threads.discard(threading.current_thread()) return pydb_daemon_threads @@ -2298,7 +2290,7 @@ class PyDB(object): else: pydev_log.debug("PyDB.dispose_and_kill_all_pydevd_threads timed out waiting for writer to be empty.") - pydb_daemon_threads = set(dict_keys(self.created_pydb_daemon_threads)) + pydb_daemon_threads = set(self.created_pydb_daemon_threads) for t in pydb_daemon_threads: if hasattr(t, 'do_kill_pydev_thread'): pydev_log.debug("PyDB.dispose_and_kill_all_pydevd_threads killing thread: %s", t) @@ -2909,7 +2901,7 @@ def _locked_settrace( py_db.set_trace_for_frame_and_parents(get_frame().f_back) with CustomFramesContainer.custom_frames_lock: # @UndefinedVariable - for _frameId, custom_frame in dict_iter_items(CustomFramesContainer.custom_frames): + for _frameId, custom_frame in CustomFramesContainer.custom_frames.items(): py_db.set_trace_for_frame_and_parents(custom_frame.frame) else: @@ -3374,8 +3366,7 @@ def main(): if setup['save-threading']: debugger.thread_analyser = ThreadingLogger() if setup['save-asyncio']: - if IS_PY34_OR_GREATER: - debugger.asyncio_analyser = AsyncioLogger() + debugger.asyncio_analyser = AsyncioLogger() apply_debugger_options(setup) diff --git a/src/debugpy/_vendored/pydevd/pydevd_concurrency_analyser/pydevd_concurrency_logger.py b/src/debugpy/_vendored/pydevd/pydevd_concurrency_analyser/pydevd_concurrency_logger.py index 6f7139f1..8b500e26 100644 --- a/src/debugpy/_vendored/pydevd/pydevd_concurrency_analyser/pydevd_concurrency_logger.py +++ b/src/debugpy/_vendored/pydevd/pydevd_concurrency_analyser/pydevd_concurrency_logger.py @@ -4,7 +4,7 @@ from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding from _pydev_imps._pydev_saved_modules import threading from _pydevd_bundle import pydevd_xml from _pydevd_bundle.pydevd_constants import GlobalDebuggerHolder -from _pydevd_bundle.pydevd_constants import get_thread_id, IS_PY3K +from _pydevd_bundle.pydevd_constants import get_thread_id from _pydevd_bundle.pydevd_net_command import NetCommand from pydevd_concurrency_analyser.pydevd_thread_wrappers import ObjectWrapper, wrap_attr import pydevd_file_utils @@ -13,10 +13,7 @@ import sys file_system_encoding = getfilesystemencoding() -try: - from urllib import quote -except: - from urllib.parse import quote # @UnresolvedImport +from urllib.parse import quote threadingCurrentThread = threading.current_thread @@ -69,7 +66,7 @@ def get_text_list_for_frame(frame): cmdTextList.append(variables) cmdTextList.append("") curFrame = curFrame.f_back - except : + except: pydev_log.exception() return cmdTextList @@ -183,7 +180,7 @@ class ThreadingLogger: my_back = frame.f_back.f_back my_thread_id = get_thread_id(my_self_obj) send_massage = True - if IS_PY3K and hasattr(my_self_obj, "_pydev_join_called"): + if hasattr(my_self_obj, "_pydev_join_called"): send_massage = False # we can't detect stop after join in Python 2 yet if send_massage: diff --git a/src/debugpy/_vendored/pydevd/pydevd_file_utils.py b/src/debugpy/_vendored/pydevd/pydevd_file_utils.py index 9d8d647e..39450bb4 100644 --- a/src/debugpy/_vendored/pydevd/pydevd_file_utils.py +++ b/src/debugpy/_vendored/pydevd/pydevd_file_utils.py @@ -42,7 +42,7 @@ r''' ''' from _pydev_bundle import pydev_log -from _pydevd_bundle.pydevd_constants import IS_PY2, IS_PY3K, DebugInfoHolder, IS_WINDOWS, IS_JYTHON, \ +from _pydevd_bundle.pydevd_constants import DebugInfoHolder, IS_WINDOWS, IS_JYTHON, \ DISABLE_FILE_VALIDATION from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding from _pydevd_bundle.pydevd_comm_constants import file_system_encoding, filesystem_encoding_is_utf8 @@ -140,27 +140,19 @@ if sys.platform == 'win32': def _convert_to_long_pathname(filename): buf = ctypes.create_unicode_buffer(MAX_PATH) - if IS_PY2 and isinstance(filename, str): - filename = filename.decode(getfilesystemencoding()) rv = GetLongPathName(filename, buf, MAX_PATH) if rv != 0 and rv <= MAX_PATH: filename = buf.value - if IS_PY2: - filename = filename.encode(getfilesystemencoding()) return filename def _convert_to_short_pathname(filename): buf = ctypes.create_unicode_buffer(MAX_PATH) - if IS_PY2 and isinstance(filename, str): - filename = filename.decode(getfilesystemencoding()) rv = GetShortPathName(filename, buf, MAX_PATH) if rv != 0 and rv <= MAX_PATH: filename = buf.value - if IS_PY2: - filename = filename.encode(getfilesystemencoding()) return filename # Note that we have a cache for previous list dirs... the only case where this may be an @@ -202,9 +194,6 @@ if sys.platform == 'win32': # consistently (there are settings to disable it on Windows). # So, using approach which resolves by listing the dir. - if IS_PY2 and isinstance(filename, unicode): # noqa - filename = filename.encode(getfilesystemencoding()) - if '~' in filename: filename = convert_to_long_pathname(filename) @@ -257,8 +246,6 @@ elif IS_JYTHON and IS_WINDOWS: from java.io import File # noqa f = File(filename) ret = f.getCanonicalPath() - if IS_PY2 and not isinstance(ret, str): - return ret.encode(getfilesystemencoding()) return ret if IS_JYTHON: @@ -574,18 +561,8 @@ except: def _path_to_expected_str(filename): - if IS_PY2: - if not filesystem_encoding_is_utf8 and hasattr(filename, "decode"): - # filename_in_utf8 is a byte string encoded using the file system encoding - # convert it to utf8 - filename = filename.decode(file_system_encoding) - - if not isinstance(filename, bytes): - filename = filename.encode('utf-8') - - else: # py3 - if isinstance(filename, bytes): - filename = filename.decode(file_system_encoding) + if isinstance(filename, bytes): + filename = filename.decode(file_system_encoding) return filename @@ -691,12 +668,6 @@ def setup_client_server_paths(paths): # Apply normcase to the existing paths to follow the os preferences. for i, (path0, path1) in enumerate(paths): - if IS_PY2: - if isinstance(path0, unicode): # noqa - path0 = path0.encode(sys.getfilesystemencoding()) - if isinstance(path1, unicode): # noqa - path1 = path1.encode(sys.getfilesystemencoding()) - force_only_slash = path0.endswith(('/', '\\')) and path1.endswith(('/', '\\')) if not force_only_slash: @@ -902,10 +873,7 @@ def get_abs_path_real_path_and_base_from_frame(frame, NORM_PATHS_AND_BASE_CONTAI def get_fullname(mod_name): - if IS_PY3K: - import pkgutil - else: - from _pydev_imps import _pydev_pkgutil_old as pkgutil + import pkgutil try: loader = pkgutil.get_loader(mod_name) except: diff --git a/src/debugpy/_vendored/pydevd/pydevd_plugins/django_debug.py b/src/debugpy/_vendored/pydevd/pydevd_plugins/django_debug.py index 20e10de5..ff7f1eb9 100644 --- a/src/debugpy/_vendored/pydevd/pydevd_plugins/django_debug.py +++ b/src/debugpy/_vendored/pydevd/pydevd_plugins/django_debug.py @@ -2,7 +2,7 @@ import inspect from _pydev_bundle import pydev_log from _pydevd_bundle.pydevd_comm import CMD_SET_BREAK, CMD_ADD_EXCEPTION_BREAK -from _pydevd_bundle.pydevd_constants import STATE_SUSPEND, dict_iter_items, DJANGO_SUSPEND, IS_PY2, \ +from _pydevd_bundle.pydevd_constants import STATE_SUSPEND, DJANGO_SUSPEND, \ DebugInfoHolder from _pydevd_bundle.pydevd_frame_utils import add_exception_to_frame, FCode, just_raised, ignore_exception_trace from pydevd_file_utils import canonical_normalized_path, absolute_path @@ -241,10 +241,7 @@ def _find_django_render_frame(frame): def _read_file(filename): # type: (str) -> str - if IS_PY2: - f = open(filename, 'r') - else: - f = open(filename, 'r', encoding='utf-8', errors='replace') + f = open(filename, 'r', encoding='utf-8', errors='replace') s = f.read() f.close() return s @@ -292,9 +289,6 @@ def _get_source_django_18_or_lower(frame): def _convert_to_str(s): - if IS_PY2: - if isinstance(s, unicode): - s = s.encode('utf-8') return s @@ -461,7 +455,7 @@ def has_exception_breaks(plugin): def has_line_breaks(plugin): - for _canonical_normalized_filename, breakpoints in dict_iter_items(plugin.main_debugger.django_breakpoints): + for _canonical_normalized_filename, breakpoints in plugin.main_debugger.django_breakpoints.items(): if len(breakpoints) > 0: return True return False diff --git a/src/debugpy/_vendored/pydevd/pydevd_plugins/jinja2_debug.py b/src/debugpy/_vendored/pydevd/pydevd_plugins/jinja2_debug.py index a660f492..a5e4a000 100644 --- a/src/debugpy/_vendored/pydevd/pydevd_plugins/jinja2_debug.py +++ b/src/debugpy/_vendored/pydevd/pydevd_plugins/jinja2_debug.py @@ -1,5 +1,4 @@ -from _pydevd_bundle.pydevd_constants import STATE_SUSPEND, dict_iter_items, dict_keys, JINJA2_SUSPEND, \ - IS_PY2 +from _pydevd_bundle.pydevd_constants import STATE_SUSPEND, JINJA2_SUSPEND from _pydevd_bundle.pydevd_comm import CMD_SET_BREAK, CMD_ADD_EXCEPTION_BREAK from pydevd_file_utils import canonical_normalized_path from _pydevd_bundle.pydevd_frame_utils import add_exception_to_frame, FCode @@ -294,9 +293,6 @@ def _get_jinja2_template_line(frame): def _convert_to_str(s): - if IS_PY2: - if isinstance(s, unicode): - s = s.encode('utf-8', 'replace') return s @@ -318,7 +314,7 @@ def has_exception_breaks(plugin): def has_line_breaks(plugin): - for _canonical_normalized_filename, breakpoints in dict_iter_items(plugin.main_debugger.jinja2_breakpoints): + for _canonical_normalized_filename, breakpoints in plugin.main_debugger.jinja2_breakpoints.items(): if len(breakpoints) > 0: return True return False @@ -336,20 +332,14 @@ def can_skip(plugin, pydb, frame): if pydb.jinja2_exception_break: name = frame.f_code.co_name - if IS_PY2: - if name == 'fail': - module_name = frame.f_globals.get('__name__', '') - if module_name == 'jinja2.parser': - return False - else: - # errors in compile time - if name in ('template', 'top-level template code', '') or name.startswith('block '): - f_back = frame.f_back - module_name = '' - if f_back is not None: - module_name = f_back.f_globals.get('__name__', '') - if module_name.startswith('jinja2.'): - return False + # errors in compile time + if name in ('template', 'top-level template code', '') or name.startswith('block '): + f_back = frame.f_back + module_name = '' + if f_back is not None: + module_name = f_back.f_globals.get('__name__', '') + if module_name.startswith('jinja2.'): + return False return True @@ -484,7 +474,7 @@ def exception_break(plugin, pydb, pydb_frame, frame, args, arg): thread = args[3] exception, value, trace = arg if pydb.jinja2_exception_break and exception is not None: - exception_type = dict_keys(pydb.jinja2_exception_break)[0] + exception_type = list(pydb.jinja2_exception_break.keys())[0] if exception.__name__ in ('UndefinedError', 'TemplateNotFound', 'TemplatesNotFound'): # errors in rendering render_frame = _find_jinja2_render_frame(frame) @@ -499,35 +489,18 @@ def exception_break(plugin, pydb, pydb_frame, frame, args, arg): elif exception.__name__ in ('TemplateSyntaxError', 'TemplateAssertionError'): name = frame.f_code.co_name - if IS_PY2: - if name == 'fail': - module_name = frame.f_globals.get('__name__', '') - if module_name == 'jinja2.parser': - filename = value.filename - lineno = value.lineno + # errors in compile time + if name in ('template', 'top-level template code', '') or name.startswith('block '): - syntax_error_frame = Jinja2TemplateSyntaxErrorFrame( - frame, exception.__name__, filename, lineno, {'name': value.name, 'exception': value}) + f_back = frame.f_back + if f_back is not None: + module_name = f_back.f_globals.get('__name__', '') - pydb_frame.set_suspend(thread, CMD_ADD_EXCEPTION_BREAK) - add_exception_to_frame(syntax_error_frame, (exception, value, trace)) - thread.additional_info.suspend_type = JINJA2_SUSPEND - thread.additional_info.pydev_message = str(exception_type) - return True, syntax_error_frame - - else: - # errors in compile time - if name in ('template', 'top-level template code', '') or name.startswith('block '): - - f_back = frame.f_back - if f_back is not None: - module_name = f_back.f_globals.get('__name__', '') - - if module_name.startswith('jinja2.'): - # Jinja2 translates exception info and creates fake frame on his own - pydb_frame.set_suspend(thread, CMD_ADD_EXCEPTION_BREAK) - add_exception_to_frame(frame, (exception, value, trace)) - thread.additional_info.suspend_type = JINJA2_SUSPEND - thread.additional_info.pydev_message = str(exception_type) - return True, frame + if module_name.startswith('jinja2.'): + # Jinja2 translates exception info and creates fake frame on his own + pydb_frame.set_suspend(thread, CMD_ADD_EXCEPTION_BREAK) + add_exception_to_frame(frame, (exception, value, trace)) + thread.additional_info.suspend_type = JINJA2_SUSPEND + thread.additional_info.pydev_message = str(exception_type) + return True, frame return None diff --git a/src/debugpy/_vendored/pydevd/pydevd_plugins/pydevd_line_validation.py b/src/debugpy/_vendored/pydevd/pydevd_plugins/pydevd_line_validation.py index 418e518f..2b64a525 100644 --- a/src/debugpy/_vendored/pydevd/pydevd_plugins/pydevd_line_validation.py +++ b/src/debugpy/_vendored/pydevd/pydevd_plugins/pydevd_line_validation.py @@ -1,5 +1,4 @@ from _pydevd_bundle.pydevd_breakpoints import LineBreakpoint -from _pydevd_bundle.pydevd_constants import dict_items from _pydevd_bundle.pydevd_api import PyDevdAPI import bisect from _pydev_bundle import pydev_log @@ -71,7 +70,7 @@ class ValidationInfo(object): self._verify_breakpoints_with_lines_collected(py_db, canonical_normalized_filename, template_breakpoints_for_file, valid_lines_frozenset, sorted_lines) def _verify_breakpoints_with_lines_collected(self, py_db, canonical_normalized_filename, template_breakpoints_for_file, valid_lines_frozenset, sorted_lines): - for line, template_bp in dict_items(template_breakpoints_for_file): # Note: iterate in a copy (we may mutate it). + for line, template_bp in list(template_breakpoints_for_file.items()): # Note: iterate in a copy (we may mutate it). if template_bp.verified_cache_key != valid_lines_frozenset: template_bp.verified_cache_key = valid_lines_frozenset valid = line in valid_lines_frozenset diff --git a/src/debugpy/_vendored/pydevd/pydevd_tracing.py b/src/debugpy/_vendored/pydevd/pydevd_tracing.py index 7bf66c15..5ed7b0ce 100644 --- a/src/debugpy/_vendored/pydevd/pydevd_tracing.py +++ b/src/debugpy/_vendored/pydevd/pydevd_tracing.py @@ -1,24 +1,17 @@ from _pydevd_bundle.pydevd_constants import get_frame, IS_CPYTHON, IS_64BIT_PROCESS, IS_WINDOWS, \ - IS_LINUX, IS_MAC, IS_PY2, DebugInfoHolder, LOAD_NATIVE_LIB_FLAG, \ - ENV_FALSE_LOWER_VALUES, GlobalDebuggerHolder, ForkSafeLock + IS_LINUX, IS_MAC, DebugInfoHolder, LOAD_NATIVE_LIB_FLAG, \ + ENV_FALSE_LOWER_VALUES, ForkSafeLock from _pydev_imps._pydev_saved_modules import thread, threading from _pydev_bundle import pydev_log, pydev_monkey -from os.path import os +import os.path import platform try: import ctypes except ImportError: ctypes = None -try: - import cStringIO as StringIO # may not always be available @UnusedImport -except: - try: - import StringIO # @Reimport - except: - import io as StringIO - -import sys # @Reimport +from io import StringIO +import sys import traceback _original_settrace = sys.settrace @@ -35,7 +28,7 @@ class TracingFunctionHolder: def get_exception_traceback_str(): exc_info = sys.exc_info() - s = StringIO.StringIO() + s = StringIO() traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], file=s) return s.getvalue() @@ -47,7 +40,7 @@ def _get_stack_str(frame): '\nto see how to restore the debug tracing back correctly.\n' if TracingFunctionHolder._traceback_limit: - s = StringIO.StringIO() + s = StringIO() s.write('Call Location:\n') traceback.print_stack(f=frame, limit=TracingFunctionHolder._traceback_limit, file=s) msg = msg + s.getvalue() @@ -314,10 +307,7 @@ def set_trace_to_threads(tracing_func, thread_idents=None, create_dummy_thread=T def _set_ident(self): # Note: Hack to set the thread ident that we want. - if IS_PY2: - self._Thread__ident = thread_ident - else: - self._ident = thread_ident + self._ident = thread_ident t = _DummyThread() # Reset to the base class (don't expose our own version of the class). diff --git a/src/debugpy/_vendored/pydevd/runfiles.py b/src/debugpy/_vendored/pydevd/runfiles.py index 8d17ac63..b3ecdf92 100644 --- a/src/debugpy/_vendored/pydevd/runfiles.py +++ b/src/debugpy/_vendored/pydevd/runfiles.py @@ -4,7 +4,6 @@ Entry point module (keep at root): Used to run with tests with unittest/pytest/nose. ''' - import os try: @@ -12,6 +11,7 @@ try: except: xrange = range + def main(): import sys @@ -36,9 +36,12 @@ def main(): try: # Convert to the case stored in the filesystem import win32api + def get_with_filesystem_case(f): return win32api.GetLongPathName(win32api.GetShortPathName(f)) + except: + def get_with_filesystem_case(f): return f @@ -106,7 +109,6 @@ def main(): # --tests = Constants.ATTR_UNITTEST_TESTS # --config-file = Constants.ATTR_UNITTEST_CONFIGURATION_FILE - # The only thing actually handled here are the tests that we want to run, which we'll # handle and pass as what the test framework expects. @@ -151,7 +153,6 @@ def main(): argv = other_test_framework_params + files_or_dirs - if test_framework == NOSE_FRAMEWORK: # Nose usage: http://somethingaboutorange.com/mrl/projects/nose/0.11.2/usage.html # show_stdout_option = ['-s'] @@ -219,7 +220,7 @@ def main(): # Workaround bug in py.test: if we pass the full path it ends up importing conftest # more than once (so, always work with relative paths). if os.path.isfile(arg) or os.path.isdir(arg): - + # Args must be passed with the proper case in the filesystem (otherwise # python itself may not recognize it). arg = get_with_filesystem_case(arg) @@ -251,8 +252,7 @@ def main(): # Set what should be skipped in the plugin through an environment variable s = base64.b64encode(zlib.compress(pickle.dumps(py_test_accept_filter))) - if pydevd_constants.IS_PY3K: - s = s.decode('ascii') # Must be str in py3. + s = s.decode('ascii') # Must be str in py3. os.environ['PYDEV_PYTEST_SKIP'] = s # Identifies the main pid (i.e.: if it's not the main pid it has to connect back to the @@ -286,6 +286,7 @@ if __name__ == '__main__': import traceback class DumpThreads(threading.Thread): + def run(self): time.sleep(10) @@ -316,7 +317,6 @@ if __name__ == '__main__': stack_trace.append('\n=============================== END Thread Dump ===============================') sys.stderr.write('\n'.join(stack_trace)) - dump_current_frames_thread = DumpThreads() dump_current_frames_thread.daemon = True # Daemon so that this thread doesn't halt it! dump_current_frames_thread.start() diff --git a/src/debugpy/_vendored/pydevd/stubs/_get_tips.py b/src/debugpy/_vendored/pydevd/stubs/_get_tips.py deleted file mode 100644 index 79dffaaa..00000000 --- a/src/debugpy/_vendored/pydevd/stubs/_get_tips.py +++ /dev/null @@ -1,283 +0,0 @@ -import os.path -import inspect -import sys - -# completion types. -TYPE_IMPORT = '0' -TYPE_CLASS = '1' -TYPE_FUNCTION = '2' -TYPE_ATTR = '3' -TYPE_BUILTIN = '4' -TYPE_PARAM = '5' - -def _imp(name, log=None): - try: - return __import__(name) - except: - if '.' in name: - sub = name[0:name.rfind('.')] - - if log is not None: - log.AddContent('Unable to import', name, 'trying with', sub) - # log.AddContent('PYTHONPATH:') - # log.AddContent('\n'.join(sorted(sys.path))) - log.AddException() - - return _imp(sub, log) - else: - s = 'Unable to import module: %s - sys.path: %s' % (str(name), sys.path) - if log is not None: - log.AddContent(s) - log.AddException() - - raise ImportError(s) - - -IS_IPY = False -if sys.platform == 'cli': - IS_IPY = True - _old_imp = _imp - def _imp(name, log=None): - # We must add a reference in clr for .Net - import clr # @UnresolvedImport - initial_name = name - while '.' in name: - try: - clr.AddReference(name) - break # If it worked, that's OK. - except: - name = name[0:name.rfind('.')] - else: - try: - clr.AddReference(name) - except: - pass # That's OK (not dot net module). - - return _old_imp(initial_name, log) - - - -def GetFile(mod): - f = None - try: - f = inspect.getsourcefile(mod) or inspect.getfile(mod) - except: - try: - f = getattr(mod, '__file__', None) - except: - pass - else: - if f and f.lower(f[-4:]) in ['.pyc', '.pyo']: - filename = f[:-4] + '.py' - if os.path.exists(filename): - f = filename - - return f - -def Find(name, log=None): - f = None - - mod = _imp(name, log) - parent = mod - foundAs = '' - - if inspect.ismodule(mod): - f = GetFile(mod) - - components = name.split('.') - - old_comp = None - for comp in components[1:]: - try: - # this happens in the following case: - # we have mx.DateTime.mxDateTime.mxDateTime.pyd - # but after importing it, mx.DateTime.mxDateTime shadows access to mxDateTime.pyd - mod = getattr(mod, comp) - except AttributeError: - if old_comp != comp: - raise - - if inspect.ismodule(mod): - f = GetFile(mod) - else: - if len(foundAs) > 0: - foundAs = foundAs + '.' - foundAs = foundAs + comp - - old_comp = comp - - return f, mod, parent, foundAs - - -def GenerateTip(data, log=None): - data = data.replace('\n', '') - if data.endswith('.'): - data = data.rstrip('.') - - f, mod, parent, foundAs = Find(data, log) - # print_ >> open('temp.txt', 'w'), f - tips = GenerateImportsTipForModule(mod) - return f, tips - - -def CheckChar(c): - if c == '-' or c == '.': - return '_' - return c - -def GenerateImportsTipForModule(obj_to_complete, dirComps=None, getattr=getattr, filter=lambda name:True): - ''' - @param obj_to_complete: the object from where we should get the completions - @param dirComps: if passed, we should not 'dir' the object and should just iterate those passed as a parameter - @param getattr: the way to get a given object from the obj_to_complete (used for the completer) - @param filter: a callable that receives the name and decides if it should be appended or not to the results - @return: list of tuples, so that each tuple represents a completion with: - name, doc, args, type (from the TYPE_* constants) - ''' - ret = [] - - if dirComps is None: - dirComps = dir(obj_to_complete) - if hasattr(obj_to_complete, '__dict__'): - dirComps.append('__dict__') - if hasattr(obj_to_complete, '__class__'): - dirComps.append('__class__') - - getCompleteInfo = True - - if len(dirComps) > 1000: - # ok, we don't want to let our users wait forever... - # no complete info for you... - - getCompleteInfo = False - - dontGetDocsOn = (float, int, str, tuple, list) - for d in dirComps: - - if d is None: - continue - - if not filter(d): - continue - - args = '' - - try: - obj = getattr(obj_to_complete, d) - except: # just ignore and get it without aditional info - ret.append((d, '', args, TYPE_BUILTIN)) - else: - - if getCompleteInfo: - retType = TYPE_BUILTIN - - # check if we have to get docs - getDoc = True - for class_ in dontGetDocsOn: - - if isinstance(obj, class_): - getDoc = False - break - - doc = '' - if getDoc: - # no need to get this info... too many constants are defined and - # makes things much slower (passing all that through sockets takes quite some time) - try: - doc = inspect.getdoc(obj) - if doc is None: - doc = '' - except: # may happen on jython when checking java classes (so, just ignore it) - doc = '' - - - if inspect.ismethod(obj) or inspect.isbuiltin(obj) or inspect.isfunction(obj) or inspect.isroutine(obj): - try: - args, vargs, kwargs, defaults = inspect.getargspec(obj) - except: - args, vargs, kwargs, defaults = (('self',), None, None, None) - if defaults is not None: - start_defaults_at = len(args) - len(defaults) - - - r = '' - for i, a in enumerate(args): - - if len(r) > 0: - r = r + ', ' - - r = r + str(a) - - if defaults is not None and i >= start_defaults_at: - default = defaults[i - start_defaults_at] - r += '=' +str(default) - - - others = '' - if vargs: - others += '*' + vargs - - if kwargs: - if others: - others+= ', ' - others += '**' + kwargs - - if others: - r+= ', ' - - - args = '(%s%s)' % (r, others) - retType = TYPE_FUNCTION - - elif inspect.isclass(obj): - retType = TYPE_CLASS - - elif inspect.ismodule(obj): - retType = TYPE_IMPORT - - else: - retType = TYPE_ATTR - - - # add token and doc to return - assure only strings. - ret.append((d, doc, args, retType)) - - - else: # getCompleteInfo == False - if inspect.ismethod(obj) or inspect.isbuiltin(obj) or inspect.isfunction(obj) or inspect.isroutine(obj): - retType = TYPE_FUNCTION - - elif inspect.isclass(obj): - retType = TYPE_CLASS - - elif inspect.ismodule(obj): - retType = TYPE_IMPORT - - else: - retType = TYPE_ATTR - # ok, no complete info, let's try to do this as fast and clean as possible - # so, no docs for this kind of information, only the signatures - ret.append((d, '', str(args), retType)) - - return ret - - - - -if __name__ == '__main__': - # To use when we have some object: i.e.: obj_to_complete=MyModel.objects - temp = ''' -def %(method_name)s%(args)s: - """ -%(doc)s - """ -''' - - for entry in GenerateImportsTipForModule(obj_to_complete): - import textwrap - doc = textwrap.dedent(entry[1]) - lines = [] - for line in doc.splitlines(): - lines.append(' ' + line) - doc = '\n'.join(lines) - print temp % dict(method_name=entry[0], args=entry[2] or '(self)', doc=doc) diff --git a/src/debugpy/_vendored/pydevd/stubs/pycompletion.py b/src/debugpy/_vendored/pydevd/stubs/pycompletion.py deleted file mode 100644 index f9fb7733..00000000 --- a/src/debugpy/_vendored/pydevd/stubs/pycompletion.py +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/python -''' -@author Radim Kubacki -''' -from _pydev_bundle import _pydev_imports_tipper -import traceback -import StringIO -import sys -import urllib -import pycompletionserver - - -#======================================================================================================================= -# GetImports -#======================================================================================================================= -def GetImports(module_name): - try: - processor = pycompletionserver.Processor() - data = urllib.unquote_plus(module_name) - def_file, completions = _pydev_imports_tipper.GenerateTip(data) - return processor.formatCompletionMessage(def_file, completions) - except: - s = StringIO.StringIO() - exc_info = sys.exc_info() - - traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], limit=None, file=s) - err = s.getvalue() - pycompletionserver.dbg('Received error: ' + str(err), pycompletionserver.ERROR) - raise - - -#======================================================================================================================= -# main -#======================================================================================================================= -if __name__ == '__main__': - mod_name = sys.argv[1] - - print(GetImports(mod_name)) - diff --git a/src/debugpy/_vendored/pydevd/tests_python/debug_constants.py b/src/debugpy/_vendored/pydevd/tests_python/debug_constants.py index 6d289234..68876f16 100644 --- a/src/debugpy/_vendored/pydevd/tests_python/debug_constants.py +++ b/src/debugpy/_vendored/pydevd/tests_python/debug_constants.py @@ -5,17 +5,9 @@ import platform TEST_CYTHON = os.getenv('PYDEVD_USE_CYTHON', None) == 'YES' PYDEVD_TEST_VM = os.getenv('PYDEVD_TEST_VM', None) -IS_PY3K = sys.version_info[0] >= 3 IS_PY36_OR_GREATER = sys.version_info[0:2] >= (3, 6) IS_CPYTHON = platform.python_implementation() == 'CPython' -IS_PY2 = False -if sys.version_info[0] == 2: - IS_PY2 = True - -IS_PY26 = sys.version_info[:2] == (2, 6) -IS_PY27 = sys.version_info[:2] == (2, 7) -IS_PY34 = sys.version_info[:2] == (3, 4) IS_PY36 = False if sys.version_info[0] == 3 and sys.version_info[1] == 6: IS_PY36 = True diff --git a/src/debugpy/_vendored/pydevd/tests_python/debugger_fixtures.py b/src/debugpy/_vendored/pydevd/tests_python/debugger_fixtures.py index d5a3809d..8a337cf8 100644 --- a/src/debugpy/_vendored/pydevd/tests_python/debugger_fixtures.py +++ b/src/debugpy/_vendored/pydevd/tests_python/debugger_fixtures.py @@ -8,7 +8,6 @@ from tests_python import debugger_unittest from tests_python.debugger_unittest import (get_free_port, overrides, IS_CPYTHON, IS_JYTHON, IS_IRONPYTHON, CMD_ADD_DJANGO_EXCEPTION_BREAK, CMD_REMOVE_DJANGO_EXCEPTION_BREAK, CMD_ADD_EXCEPTION_BREAK, wait_for_condition, IS_PYPY) -from tests_python.debug_constants import IS_PY2 from _pydevd_bundle.pydevd_comm_constants import file_system_encoding import sys @@ -246,11 +245,7 @@ def case_setup(tmpdir, debugger_runner_simple): class CaseSetup(object): check_non_ascii = False - if IS_PY2 and IS_WINDOWS: - # Py2 has some issues converting the non latin1 chars to bytes in windows. - NON_ASCII_CHARS = u'áéíóú' - else: - NON_ASCII_CHARS = u'áéíóú汉字' + NON_ASCII_CHARS = u'áéíóú汉字' @contextmanager def test_file( @@ -276,9 +271,6 @@ def case_setup(tmpdir, debugger_runner_simple): shutil.copyfile(filename, new_filename) filename = new_filename - if IS_PY2: - filename = filename.encode(file_system_encoding) - WriterThread.TEST_FILE = filename for key, value in kwargs.items(): assert hasattr(WriterThread, key) diff --git a/src/debugpy/_vendored/pydevd/tests_python/debugger_unittest.py b/src/debugpy/_vendored/pydevd/tests_python/debugger_unittest.py index 5219a878..1fb2a645 100644 --- a/src/debugpy/_vendored/pydevd/tests_python/debugger_unittest.py +++ b/src/debugpy/_vendored/pydevd/tests_python/debugger_unittest.py @@ -291,8 +291,7 @@ class ReaderThread(threading.Thread): if SHOW_WRITES_AND_READS: show_line = line - if IS_PY3K: - show_line = line.decode('utf-8') + show_line = line.decode('utf-8') print('%s Received %s' % (self.name, show_line,)) @@ -311,8 +310,7 @@ class ReaderThread(threading.Thread): return # Finished communication. msg = json_contents - if IS_PY3K: - msg = msg.decode('utf-8') + msg = msg.decode('utf-8') print('Test Reader Thread Received %s' % (msg,)) self._queue.put(msg) @@ -329,9 +327,8 @@ class ReaderThread(threading.Thread): line = line[:-1] msg = line - if IS_PY3K: - msg = msg.decode('utf-8') - print('Test Reader Thread Received %s' % (msg,)) + msg = msg.decode('utf-8') + print('Test Reader Thread Received %s' % (msg,)) self._queue.put(msg) except: @@ -369,8 +366,7 @@ def read_process(stream, buffer, debug_stream, stream_name, finish): if not line: break - if IS_PY3K: - line = line.decode('utf-8', errors='replace') + line = line.decode('utf-8', errors='replace') if SHOW_STDOUT: debug_stream.write('%s: %s' % (stream_name, line,)) @@ -742,14 +738,6 @@ class AbstractWriterThread(threading.Thread): if line.strip().startswith('at '): return True - if IS_PY26: - # Sometimes in the ci there's an unhandled exception which doesn't have a stack trace - # (apparently this happens when a daemon thread dies during process shutdown). - # This was only reproducible on the ci on Python 2.6, so, ignoring that output on Python 2.6 only. - for expected in ( - 'Unhandled exception in thread started by <_pydev_bundle.pydev_monkey._NewThreadStartupWithTrace'): - if expected in line: - return True return False def additional_output_checks(self, stdout, stderr): @@ -847,8 +835,7 @@ class AbstractWriterThread(threading.Thread): print('%s.sock not available when sending: %s' % (self, msg)) return - if IS_PY3K: - msg = msg.encode('utf-8') + msg = msg.encode('utf-8') self.sock.send(msg) @@ -1440,8 +1427,7 @@ class AbstractWriterThread(threading.Thread): try: stream = urlopen(full_url) contents = stream.read() - if IS_PY3K: - contents = contents.decode('utf-8') + contents = contents.decode('utf-8') self.contents = contents break except IOError: diff --git a/src/debugpy/_vendored/pydevd/tests_python/test_code_obj_to_source_code.py b/src/debugpy/_vendored/pydevd/tests_python/test_code_obj_to_source_code.py index 8e866572..7516cbf4 100644 --- a/src/debugpy/_vendored/pydevd/tests_python/test_code_obj_to_source_code.py +++ b/src/debugpy/_vendored/pydevd/tests_python/test_code_obj_to_source_code.py @@ -1,13 +1,9 @@ from _pydevd_bundle.pydevd_code_to_source import code_obj_to_source import pytest -from tests_python.debug_constants import IS_PY26 # i.e.: Skip these tests (this is a work in progress / proof of concept / not ready to be used). pytestmark = pytest.mark.skip -if IS_PY26: # pytestmark not available in older versions of pytest. - pytest.skip('Work in progress') - def check(obtained, expected, strip_return_none=True): keepends = False diff --git a/src/debugpy/_vendored/pydevd/tests_python/test_collect_bytecode_info.py b/src/debugpy/_vendored/pydevd/tests_python/test_collect_bytecode_info.py index ac45c2cb..0b3d4b71 100644 --- a/src/debugpy/_vendored/pydevd/tests_python/test_collect_bytecode_info.py +++ b/src/debugpy/_vendored/pydevd/tests_python/test_collect_bytecode_info.py @@ -7,9 +7,7 @@ import traceback from _pydevd_bundle.pydevd_collect_bytecode_info import collect_try_except_info, \ collect_return_info, code_to_bytecode_representation from tests_python.debugger_unittest import IS_CPYTHON, IS_PYPY -from tests_python.debug_constants import IS_PY2, IS_PY3K -from _pydevd_bundle.pydevd_constants import IS_PY38_OR_GREATER, IS_JYTHON, IS_PY36_OR_GREATER, \ - IS_PY35_OR_GREATER +from _pydevd_bundle.pydevd_constants import IS_PY38_OR_GREATER, IS_JYTHON def _method_call_with_error(): @@ -386,7 +384,6 @@ def test_collect_try_except_info_multiple_except(exc_verifier): exc_verifier.check(try_except_with, '[{try:1 except 3 end block 8}]') -@pytest.mark.skipif(not IS_PY35_OR_GREATER, reason='Python 3.5 onwards required for async for/async def') def test_collect_try_except_info_async_for(): # Not valid on Python 2. @@ -460,9 +457,7 @@ def test_collect_return_info(): assert str(collect_return_info(method5.__code__, use_func_first_line=True)) == \ '[{return: 1}]' if IS_PY38_OR_GREATER else '[{return: 3}]' - if not IS_PY2: - # return in generator is not valid for python 2. - code = ''' + code = ''' def method(): if a: yield 1 @@ -472,10 +467,10 @@ def method(): pass ''' - scope = {} - exec(code, scope) - assert str(collect_return_info(scope['method'].__code__, use_func_first_line=True)) == \ - '[{return: 4}, {return: 6}]' + scope = {} + exec(code, scope) + assert str(collect_return_info(scope['method'].__code__, use_func_first_line=True)) == \ + '[{return: 4}, {return: 6}]' @pytest.mark.skipif(IS_JYTHON, reason='Jython does not have bytecode support.') @@ -624,10 +619,10 @@ def test_simple_code_to_bytecode_cls_method(): def test_simple_code_to_bytecode_repr_unicode(): def method4(): - return 'áéíóú' + return 'áéíóú' new_repr = code_to_bytecode_representation(method4.__code__, use_func_first_line=True) - assert repr('áéíóú') in new_repr + assert repr('áéíóú') in new_repr def _create_entry(instruction): diff --git a/src/debugpy/_vendored/pydevd/tests_python/test_console.py b/src/debugpy/_vendored/pydevd/tests_python/test_console.py index 428f7aee..c1d0f84b 100644 --- a/src/debugpy/_vendored/pydevd/tests_python/test_console.py +++ b/src/debugpy/_vendored/pydevd/tests_python/test_console.py @@ -12,12 +12,8 @@ from _pydev_bundle.pydev_imports import _queue as queue from _pydev_bundle.pydev_imports import SimpleXMLRPCServer import time import socket -from tests_python.debug_constants import IS_PY2 -if IS_PY2: - builtin_qualifier = "__builtin__" -else: - builtin_qualifier = "builtins" +builtin_qualifier = "builtins" @pytest.fixture diff --git a/src/debugpy/_vendored/pydevd/tests_python/test_convert_utilities.py b/src/debugpy/_vendored/pydevd/tests_python/test_convert_utilities.py index 3fcba4a1..af27e74f 100644 --- a/src/debugpy/_vendored/pydevd/tests_python/test_convert_utilities.py +++ b/src/debugpy/_vendored/pydevd/tests_python/test_convert_utilities.py @@ -1,6 +1,6 @@ # coding: utf-8 import os.path -from _pydevd_bundle.pydevd_constants import IS_WINDOWS, IS_PY2 +from _pydevd_bundle.pydevd_constants import IS_WINDOWS from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding import io from _pydev_bundle.pydev_log import log_context @@ -54,15 +54,6 @@ def test_convert_utilities(tmpdir): assert pydevd_file_utils._listdir_cache[os.path.dirname(normalized).lower()] == ['Test_Convert_Utilities'] assert pydevd_file_utils._listdir_cache[(os.path.dirname(normalized).lower(), 'Test_Convert_Utilities'.lower())] == real_case - if IS_PY2: - # Test with unicode in python 2 too. - real_case = pydevd_file_utils.get_path_with_real_case(normalized.decode( - getfilesystemencoding())) - assert isinstance(real_case, str) # bytes on py2, unicode on py3 - # Note test_dir itself cannot be compared with because pytest may - # have passed the case normalized. - assert real_case.endswith("Test_Convert_Utilities") - # Check that it works with a shortened path. shortened = pydevd_file_utils.convert_to_short_pathname(normalized) assert '~' in shortened diff --git a/src/debugpy/_vendored/pydevd/tests_python/test_debugger.py b/src/debugpy/_vendored/pydevd/tests_python/test_debugger.py index 53036eeb..d39f3a97 100644 --- a/src/debugpy/_vendored/pydevd/tests_python/test_debugger.py +++ b/src/debugpy/_vendored/pydevd/tests_python/test_debugger.py @@ -26,7 +26,6 @@ import json import pydevd_file_utils import subprocess import threading -from tests_python.debug_constants import IS_PY26 from _pydev_bundle import pydev_log try: from urllib import unquote @@ -39,15 +38,9 @@ pytest_plugins = [ str('tests_python.debugger_fixtures'), ] -try: - xrange -except: - xrange = range +xrange = range -if IS_PY2: - builtin_qualifier = "__builtin__" -else: - builtin_qualifier = "builtins" +builtin_qualifier = "builtins" @pytest.mark.skipif(not IS_CPYTHON, reason='Test needs gc.get_referrers/reference counting to really check anything.') @@ -1360,10 +1353,7 @@ def test_case_handled_and_unhandled_exception_generator(case_setup, target_file, if 'generator' in target_file: expected_frame_names = ['', 'f', ''] else: - if IS_PY27 or IS_PY26: - expected_frame_names = ['f', ''] - else: - expected_frame_names = ['', 'f', ''] + expected_frame_names = ['', 'f', ''] writer.write_get_current_exception(hit.thread_id) msg = writer.wait_for_message(accept_message=lambda msg:'exc_type="' in msg and 'exc_desc="' in msg, unquote_msg=False) @@ -1374,15 +1364,10 @@ def test_case_handled_and_unhandled_exception_generator(case_setup, target_file, writer.write_run_thread(hit.thread_id) if not unhandled: - if (IS_PY26 or IS_PY27) and 'listcomp' in target_file: - expected_lines = [ - writer.get_line_index_with_content('# call exc'), - ] - else: - expected_lines = [ - writer.get_line_index_with_content('# exc line'), - writer.get_line_index_with_content('# call exc'), - ] + expected_lines = [ + writer.get_line_index_with_content('# exc line'), + writer.get_line_index_with_content('# call exc'), + ] for expected_line in expected_lines: hit = writer.wait_for_breakpoint_hit(REASON_CAUGHT_EXCEPTION) @@ -1965,7 +1950,7 @@ def test_case_settrace(case_setup): writer.finished_ok = True -@pytest.mark.skipif(True or IS_PY26 or IS_JYTHON, reason='This is *very* flaky. Scapy only supports 2.7 onwards, not available for jython.') +@pytest.mark.skipif(True, reason='This is *very* flaky.') def test_case_scapy(case_setup): with case_setup.test_file('_debugger_case_scapy.py') as writer: writer.FORCE_KILL_PROCESS_WHEN_FINISHED_OK = True @@ -2433,14 +2418,11 @@ def test_py_37_breakpoint(case_setup, filename): def _get_generator_cases(): - if IS_PY2: - return ('_debugger_case_generator_py2.py',) - else: - # On py3 we should check both versions. - return ( - '_debugger_case_generator_py2.py', - '_debugger_case_generator_py3.py', - ) + # On py3 we should check both versions. + return ( + '_debugger_case_generator_py2.py', + '_debugger_case_generator_py3.py', + ) @pytest.mark.parametrize("filename", _get_generator_cases()) @@ -2653,8 +2635,7 @@ def test_multiprocessing_with_stopped_breakpoints(case_setup_multiprocessing, co secondary_process_thread_communication.start() ok = listening_event.wait(timeout=10) - if not IS_PY26: - assert ok + assert ok writer.write_make_initial_run() hit2 = writer.wait_for_breakpoint_hit() # Breaks in thread. writer.write_step_over(hit2.thread_id) @@ -3083,7 +3064,6 @@ def test_trace_dispatch_correct(case_setup): writer.finished_ok = True -@pytest.mark.skipif(IS_PY26, reason='Failing on Python 2.6 on travis (needs investigation).') def test_case_single_notification_on_step(case_setup): from tests_python.debugger_unittest import REASON_STEP_INTO with case_setup.test_file('_debugger_case_import_main.py') as writer: @@ -3509,7 +3489,6 @@ def test_step_return_my_code(case_setup): writer.finished_ok = True -@pytest.mark.skipif(IS_PY2, reason='Python 3 onwards required.') def test_smart_step_into_case1(case_setup): with case_setup.test_file('_debugger_case_smart_step_into.py') as writer: line = writer.get_line_index_with_content('break here') @@ -3532,7 +3511,6 @@ def test_smart_step_into_case1(case_setup): writer.finished_ok = True -@pytest.mark.skipif(IS_PY2, reason='Python 3 onwards required.') def test_smart_step_into_case2(case_setup): with case_setup.test_file('_debugger_case_smart_step_into2.py') as writer: line = writer.get_line_index_with_content('break here') @@ -3561,7 +3539,6 @@ def test_smart_step_into_case2(case_setup): writer.finished_ok = True -@pytest.mark.skipif(IS_PY2, reason='Python 3 onwards required.') def test_smart_step_into_case3(case_setup): with case_setup.test_file('_debugger_case_smart_step_into3.py') as writer: line = writer.get_line_index_with_content('break here') @@ -3915,12 +3892,10 @@ def test_matplotlib_activation(case_setup): _GENERATOR_FILES = [ '_debugger_case_generator3.py', + '_debugger_case_generator.py', + '_debugger_case_generator2.py', ] -if not IS_PY2: - _GENERATOR_FILES.append('_debugger_case_generator.py') - _GENERATOR_FILES.append('_debugger_case_generator2.py') - @pytest.mark.parametrize('target_filename', _GENERATOR_FILES) @pytest.mark.skipif(IS_JYTHON, reason='We do not detect generator returns on Jython.') @@ -4326,8 +4301,7 @@ def test_frame_eval_mode_corner_case_many(case_setup, break_name): writer.finished_ok = True -if IS_PY3K: - check_shadowed = [ +check_shadowed = [ ( u''' if __name__ == '__main__': @@ -4347,30 +4321,7 @@ if __name__ == '__main__': 'queue.py', u'raise AssertionError("error on import")' ) - ] - -else: - check_shadowed = [ - ( - u''' -if __name__ == '__main__': - import Queue - print(Queue) -''', - 'Queue.py', - u'shadowed = True\n' - ), - - ( - u''' -if __name__ == '__main__': - import Queue - print(Queue) -''', - 'Queue.py', - u'raise AssertionError("error on import")' - ) - ] +] @pytest.mark.parametrize('module_name_and_content', check_shadowed) diff --git a/src/debugpy/_vendored/pydevd/tests_python/test_debugger_json.py b/src/debugpy/_vendored/pydevd/tests_python/test_debugger_json.py index f1c9143f..3f2c6340 100644 --- a/src/debugpy/_vendored/pydevd/tests_python/test_debugger_json.py +++ b/src/debugpy/_vendored/pydevd/tests_python/test_debugger_json.py @@ -21,8 +21,8 @@ from _pydevd_bundle.pydevd_constants import (int_types, IS_64BIT_PROCESS, PY_VERSION_STR, PY_IMPL_VERSION_STR, PY_IMPL_NAME, IS_PY36_OR_GREATER, IS_PYPY, GENERATED_LEN_ATTR_NAME, IS_WINDOWS, IS_LINUX, IS_MAC, IS_PY38_OR_GREATER) from tests_python import debugger_unittest -from tests_python.debug_constants import TEST_CHERRYPY, IS_PY2, TEST_DJANGO, TEST_FLASK, IS_PY26, \ - IS_PY27, IS_CPYTHON, TEST_GEVENT, TEST_CYTHON +from tests_python.debug_constants import TEST_CHERRYPY, TEST_DJANGO, TEST_FLASK, \ + IS_CPYTHON, TEST_GEVENT, TEST_CYTHON from tests_python.debugger_unittest import (IS_JYTHON, IS_APPVEYOR, overrides, get_free_port, wait_for_condition) from _pydevd_bundle.pydevd_utils import DAPGrouper @@ -136,11 +136,6 @@ class JsonFacade(object): json_hit = self.get_stack_as_json_hit(stopped_event.body.threadId) if file is not None: path = json_hit.stack_trace_response.body.stackFrames[0]['source']['path'] - if IS_PY2: - if isinstance(file, bytes): - file = file.decode('utf-8') - if isinstance(path, bytes): - path = path.decode('utf-8') if not path.endswith(file): # pytest may give a lowercase tempdir, so, also check with @@ -592,7 +587,6 @@ def test_case_json_logpoint_and_step(case_setup): writer.finished_ok = True -@pytest.mark.skipif(IS_PY26, reason='Failing on Python 2.6') def test_case_json_hit_count_and_step(case_setup): with case_setup.test_file('_debugger_case_hit_count.py') as writer: json_facade = JsonFacade(writer) @@ -756,7 +750,6 @@ def _check_current_line(json_hit, current_line): rep)) -@pytest.mark.skipif(IS_PY26, reason='Not ok on Python 2.6') @pytest.mark.parametrize('stop', [False, True]) def test_case_user_unhandled_exception(case_setup, stop): @@ -851,7 +844,6 @@ def test_case_user_unhandled_exception_coroutine(case_setup, stop): writer.finished_ok = True -@pytest.mark.skipif(IS_PY26, reason='Not ok on Python 2.6') def test_case_user_unhandled_exception_dont_stop(case_setup): with case_setup.test_file( @@ -872,7 +864,6 @@ def test_case_user_unhandled_exception_dont_stop(case_setup): writer.finished_ok = True -@pytest.mark.skipif(IS_PY26, reason='Not ok on Python 2.6') def test_case_user_unhandled_exception_stop_on_yield(case_setup, pyfile): @pyfile @@ -1097,10 +1088,7 @@ def test_case_unhandled_exception_generator(case_setup, target_file): if 'generator' in target_file: expected_frame_names = ['', 'f', ''] else: - if IS_PY27 or IS_PY26: - expected_frame_names = ['f', ''] - else: - expected_frame_names = ['', 'f', ''] + expected_frame_names = ['', 'f', ''] frame_names = [f['name'] for f in frames] assert frame_names == expected_frame_names @@ -1584,7 +1572,6 @@ def test_modules(case_setup): writer.finished_ok = True -@pytest.mark.skipif(IS_PY26, reason='Python 2.6 does not have an ordered dict') def test_dict_ordered(case_setup): with case_setup.test_file('_debugger_case_odict.py') as writer: json_facade = JsonFacade(writer) @@ -1634,23 +1621,13 @@ def test_stack_and_variables_dict(case_setup): assert isinstance(dict_variable_reference, int_types) # : :type variables_response: VariablesResponse - if IS_PY2: - print(repr(variables_response.body.variables[-1])) - expected_unicode = { - u'name': u'\u16a0', - u'value': u"u'\\u16a1'", - u'type': u'unicode', - u'presentationHint': {u'attributes': [u'rawString']}, - u'evaluateName': u'\u16a0', - } - else: - expected_unicode = { - 'name': u'\u16A0', - 'value': "'\u16a1'", - 'type': 'str', - 'presentationHint': {'attributes': ['rawString']}, - 'evaluateName': u'\u16A0', - } + expected_unicode = { + 'name': u'\u16A0', + 'value': "'\u16a1'", + 'type': 'str', + 'presentationHint': {'attributes': ['rawString']}, + 'evaluateName': u'\u16A0', + } assert variables_response.body.variables == [ {'name': 'variable_for_test_1', 'value': '10', 'type': 'int', 'evaluateName': 'variable_for_test_1'}, {'name': 'variable_for_test_2', 'value': '20', 'type': 'int', 'evaluateName': 'variable_for_test_2'}, @@ -1687,13 +1664,9 @@ def test_variables_with_same_name(case_setup): assert isinstance(dict_variable_reference, int_types) # : :type variables_response: VariablesResponse - if not IS_PY2: - assert variables_response.body.variables == [ - {'name': 'td', 'value': "{foo: 'bar', gad: 'zooks', foo: 'bur'}", 'type': 'dict', 'evaluateName': 'td'} - ] - else: - # The value may change the representation on Python 2 as dictionaries don't keep the insertion order. - assert len(variables_response.body.variables) == 1 + assert variables_response.body.variables == [ + {'name': 'td', 'value': "{foo: 'bar', gad: 'zooks', foo: 'bur'}", 'type': 'dict', 'evaluateName': 'td'} + ] dict_variables_response = json_facade.get_variables_response(dict_variable_reference) # Note that we don't have the evaluateName because it's not possible to create a key @@ -1780,8 +1753,7 @@ def test_hasattr_failure(case_setup): evaluate_response = json_facade.evaluate('obj', json_hit.frame_id, context='hover') evaluate_response_body = evaluate_response.body.to_dict() - if not IS_PY2: - assert evaluate_response_body['result'] == 'An exception was raised: RuntimeError()' + assert evaluate_response_body['result'] == 'An exception was raised: RuntimeError()' json_facade.evaluate('not_there', json_hit.frame_id, context='hover', success=False) json_facade.evaluate('not_there', json_hit.frame_id, context='watch', success=False) @@ -2057,7 +2029,6 @@ def test_evaluate_block_clipboard(case_setup, pyfile): writer.finished_ok = True -@pytest.mark.skipif(IS_PY26, reason='__dir__ not customizable on Python 2.6') def test_exception_on_dir(case_setup): with case_setup.test_file('_debugger_case_dir_exception.py') as writer: json_facade = JsonFacade(writer) @@ -2086,9 +2057,6 @@ def test_exception_on_dir(case_setup): ]) @pytest.mark.parametrize('asyncio', [True, False]) def test_return_value_regular(case_setup, scenario, asyncio): - if IS_PY2 and asyncio: - raise pytest.skip('asyncio not available for python 2.') - with case_setup.test_file('_debugger_case_return_value.py' if not asyncio else '_debugger_case_return_value_asyncio.py') as writer: json_facade = JsonFacade(writer) @@ -2147,10 +2115,7 @@ def test_stack_and_variables_set_and_list(case_setup): variables_response = json_facade.get_variables_response(json_hit.frame_id) variables_references = json_facade.pop_variables_reference(variables_response.body.variables) - if IS_PY2: - expected_set = "set(['a'])" - else: - expected_set = "{'a'}" + expected_set = "{'a'}" assert variables_response.body.variables == [ {'type': 'list', 'evaluateName': 'variable_for_test_1', 'name': 'variable_for_test_1', 'value': "['a', 'b']"}, {'type': 'set', 'evaluateName': 'variable_for_test_2', 'name': 'variable_for_test_2', 'value': expected_set} @@ -2225,45 +2190,17 @@ def test_evaluate_unicode(case_setup): evaluate_response_body = evaluate_response.body.to_dict() - if IS_PY2: - # The error can be referenced. - variables_reference = json_facade.pop_variables_reference([evaluate_response_body]) - - assert evaluate_response_body == { - 'result': u"SyntaxError('invalid syntax', ('', 1, 1, '\\xe1\\x9a\\xa0'))", - 'type': u'SyntaxError', - 'presentationHint': {}, - } - - assert len(variables_reference) == 1 - reference = variables_reference[0] - assert reference > 0 - variables_response = json_facade.get_variables_response(reference) - child_variables = variables_response.to_dict()['body']['variables'] - assert len(child_variables) == 2 - for c in child_variables: - if c[u'type'] == u'SyntaxError': - assert c.pop('variablesReference') > 0 - assert c == { - u'type': u'SyntaxError', - u'evaluateName': u'\u16a0.result', - u'name': u'result', - u'value': u"SyntaxError('invalid syntax', ('', 1, 1, '\\xe1\\x9a\\xa0'))" - } - - else: - assert evaluate_response_body == { - 'result': "'\u16a1'", - 'type': 'str', - 'variablesReference': 0, - 'presentationHint': {'attributes': ['rawString']}, - } + assert evaluate_response_body == { + 'result': "'\u16a1'", + 'type': 'str', + 'variablesReference': 0, + 'presentationHint': {'attributes': ['rawString']}, + } json_facade.write_continue() writer.finished_ok = True -@pytest.mark.skipif(IS_PY26, reason='Not ok on Python 2.6.') def test_evaluate_exec_unicode(case_setup): def get_environ(writer): @@ -2402,7 +2339,7 @@ def test_evaluate_variable_references(case_setup): assert evaluate_response_body == { 'type': 'set', - 'result': "set(['a'])" if IS_PY2 else "{'a'}", + 'result': "{'a'}", 'presentationHint': {}, } assert len(variables_reference) == 1 @@ -3804,7 +3741,6 @@ cherrypy.quickstart(HelloWorld()) writer.finished_ok = True -@pytest.mark.skipif(IS_PY26, reason='Flaky on Python 2.6.') def test_wait_for_attach(case_setup_remote_attach_to): host_port = get_socket_name(close=True) @@ -4039,9 +3975,6 @@ def test_path_translation_and_source_reference(case_setup): translated_dir_not_ascii = u'áéíóú汉字' - if IS_PY2: - translated_dir_not_ascii = translated_dir_not_ascii.encode(file_system_encoding) - def get_file_in_client(writer): # Instead of using: test_python/_debugger_case_path_translation.py # we'll set the breakpoints at foo/_debugger_case_path_translation.py @@ -4064,8 +3997,6 @@ def test_path_translation_and_source_reference(case_setup): bp_line = writer.get_line_index_with_content('break here') assert writer.TEST_FILE.endswith('_debugger_case_path_translation.py') local_root = os.path.dirname(get_file_in_client(writer)) - if IS_PY2: - local_root = local_root.decode(file_system_encoding).encode('utf-8') json_facade.write_launch(pathMappings=[{ 'localRoot': local_root, 'remoteRoot': os.path.dirname(writer.TEST_FILE), @@ -4092,11 +4023,6 @@ def test_path_translation_and_source_reference(case_setup): path = stack_frame['source']['path'] file_in_client_unicode = file_in_client - if IS_PY2: - if isinstance(path, bytes): - path = path.decode('utf-8') - if isinstance(file_in_client_unicode, bytes): - file_in_client_unicode = file_in_client_unicode.decode(file_system_encoding) assert path == file_in_client_unicode source_reference = stack_frame['source']['sourceReference'] @@ -4569,11 +4495,6 @@ def test_redirect_output(case_setup): output_event = json_facade.wait_for_json_message(OutputEvent) output = output_event.body.output category = output_event.body.category - if IS_PY2: - if isinstance(output, unicode): # noqa -- unicode not available in py3 - output = output.encode('utf-8') - if isinstance(category, unicode): # noqa -- unicode not available in py3 - category = category.encode('utf-8') msg = (output, category) except Exception: for msg in msgs: @@ -4761,7 +4682,6 @@ def test_subprocess_pydevd_customization(case_setup_remote, command_line_args): writer.finished_ok = True -@pytest.mark.skipif(IS_PY26, reason='Only Python 2.7 onwards.') def test_subprocess_then_fork(case_setup_multiprocessing): import threading from tests_python.debugger_unittest import AbstractWriterThread @@ -5445,7 +5365,6 @@ def test_variable_presentation(case_setup, var_presentation, check_func): writer.finished_ok = True -@pytest.mark.skipif(IS_PY26, reason='Only Python 2.7 onwards.') def test_debugger_case_deadlock_thread_eval(case_setup): def get_environ(self): @@ -5469,7 +5388,6 @@ def test_debugger_case_deadlock_thread_eval(case_setup): writer.finished_ok = True -@pytest.mark.skipif(IS_PY26, reason='Only Python 2.7 onwards.') def test_debugger_case_breakpoint_on_unblock_thread_eval(case_setup): from _pydevd_bundle._debug_adapter.pydevd_schema import EvaluateResponse @@ -5509,7 +5427,6 @@ def test_debugger_case_breakpoint_on_unblock_thread_eval(case_setup): writer.finished_ok = True -@pytest.mark.skipif(IS_PY26, reason='Only Python 2.7 onwards.') def test_debugger_case_unblock_manually(case_setup): from _pydevd_bundle._debug_adapter.pydevd_schema import EvaluateResponse @@ -5545,7 +5462,6 @@ def test_debugger_case_unblock_manually(case_setup): writer.finished_ok = True -@pytest.mark.skipif(IS_PY26, reason='Only Python 2.7 onwards.') def test_debugger_case_deadlock_notify_evaluate_timeout(case_setup, pyfile): @pyfile @@ -5582,7 +5498,6 @@ def test_debugger_case_deadlock_notify_evaluate_timeout(case_setup, pyfile): writer.finished_ok = True -@pytest.mark.skipif(IS_PY26, reason='Only Python 2.7 onwards.') def test_debugger_case_deadlock_interrupt_thread(case_setup, pyfile): @pyfile @@ -5792,7 +5707,6 @@ def do_something(): writer.finished_ok = True -@pytest.mark.skipif(IS_PY2, reason='Python 3 onwards required.') def test_step_into_target_basic(case_setup): with case_setup.test_file('_debugger_case_smart_step_into.py') as writer: json_facade = JsonFacade(writer) @@ -5817,7 +5731,6 @@ def test_step_into_target_basic(case_setup): writer.finished_ok = True -@pytest.mark.skipif(IS_PY2, reason='Python 3 onwards required.') def test_step_into_target_multiple(case_setup): with case_setup.test_file('_debugger_case_smart_step_into2.py') as writer: json_facade = JsonFacade(writer) @@ -5842,7 +5755,6 @@ def test_step_into_target_multiple(case_setup): writer.finished_ok = True -@pytest.mark.skipif(IS_PY2, reason='Python 3 onwards required.') def test_step_into_target_genexpr(case_setup): with case_setup.test_file('_debugger_case_smart_step_into3.py') as writer: json_facade = JsonFacade(writer) diff --git a/src/debugpy/_vendored/pydevd/tests_python/test_dump_threads.py b/src/debugpy/_vendored/pydevd/tests_python/test_dump_threads.py index c077433e..b619f619 100644 --- a/src/debugpy/_vendored/pydevd/tests_python/test_dump_threads.py +++ b/src/debugpy/_vendored/pydevd/tests_python/test_dump_threads.py @@ -1,9 +1,6 @@ def test_dump_threads(): import pydevd - try: - from StringIO import StringIO - except: - from io import StringIO + from io import StringIO stream = StringIO() pydevd.dump_threads(stream=stream) contents = stream.getvalue() diff --git a/src/debugpy/_vendored/pydevd/tests_python/test_extract_token.py b/src/debugpy/_vendored/pydevd/tests_python/test_extract_token.py index 9f5509ad..94ca7272 100644 --- a/src/debugpy/_vendored/pydevd/tests_python/test_extract_token.py +++ b/src/debugpy/_vendored/pydevd/tests_python/test_extract_token.py @@ -2,18 +2,13 @@ from __future__ import unicode_literals from _pydev_bundle._pydev_completer import (isidentifier, extract_token_and_qualifier, TokenAndQualifier) -from _pydevd_bundle.pydevd_constants import IS_PY2 def test_isidentifier(): assert isidentifier('abc') assert not isidentifier('<') assert not isidentifier('') - if IS_PY2: - # Py3 accepts unicode identifiers - assert not isidentifier('áéíóú') - else: - assert isidentifier('áéíóú') + assert isidentifier('áéíóú') def test_extract_token_and_qualifier(): diff --git a/src/debugpy/_vendored/pydevd/tests_python/test_fixtures.py b/src/debugpy/_vendored/pydevd/tests_python/test_fixtures.py index ea1972a8..525ce1ca 100644 --- a/src/debugpy/_vendored/pydevd/tests_python/test_fixtures.py +++ b/src/debugpy/_vendored/pydevd/tests_python/test_fixtures.py @@ -1,7 +1,6 @@ import json from tests_python.debugger_unittest import ReaderThread, IS_JYTHON -from tests_python.debug_constants import IS_PY3K import pytest import socket from _pydev_bundle import pydev_localhost @@ -29,7 +28,7 @@ class _DummySocket(object): return self._sock_for_reader_thread.recv(*args, **kwargs) def put(self, msg): - if IS_PY3K and not isinstance(msg, bytes): + if not isinstance(msg, bytes): msg = msg.encode('utf-8') if self._sock_for_fixture_test is None: diff --git a/src/debugpy/_vendored/pydevd/tests_python/test_pydev_monkey.py b/src/debugpy/_vendored/pydevd/tests_python/test_pydev_monkey.py index 5db3a001..2681fe2f 100644 --- a/src/debugpy/_vendored/pydevd/tests_python/test_pydev_monkey.py +++ b/src/debugpy/_vendored/pydevd/tests_python/test_pydev_monkey.py @@ -7,7 +7,6 @@ import pytest from _pydev_bundle.pydev_monkey import pydev_src_dir from _pydevd_bundle.pydevd_constants import sorted_dict_repr from pydevd import SetupHolder -from tests_python.debug_constants import IS_PY2 try: from _pydev_bundle import pydev_monkey @@ -477,13 +476,8 @@ def test_monkey_patch_c_program_arg(use_bytes): encode = lambda s:s if use_bytes: - if not IS_PY2: - check = [c.encode('utf-8') for c in check] - encode = lambda s:s.encode('utf-8') - else: - if IS_PY2: - check = [c.decode('utf-8') for c in check] - encode = lambda s:s.decode('utf-8') + check = [c.encode('utf-8') for c in check] + encode = lambda s:s.encode('utf-8') assert pydev_monkey.patch_args(check) == [ encode('C:\\bin\\python.exe'), diff --git a/src/debugpy/_vendored/pydevd/tests_python/test_resolvers.py b/src/debugpy/_vendored/pydevd/tests_python/test_resolvers.py index 033012e2..8e8cc496 100644 --- a/src/debugpy/_vendored/pydevd/tests_python/test_resolvers.py +++ b/src/debugpy/_vendored/pydevd/tests_python/test_resolvers.py @@ -1,4 +1,3 @@ -from tests_python.debug_constants import IS_PY2 from _pydevd_bundle.pydevd_constants import IS_PY36_OR_GREATER, GENERATED_LEN_ATTR_NAME @@ -19,9 +18,6 @@ def test_dict_resolver(): assert contents_debug_adapter_protocol == [ ('(1, 2)', 2, '[(1, 2)]'), ("'22'", 22, "['22']")] - elif IS_PY2: - assert contents_debug_adapter_protocol == [ - ('(1, 2)', 2, '[(1, 2)]'), (u"u'22'", 22, u"[u'22']")] else: assert contents_debug_adapter_protocol == [ ("'22'", 22, "['22']"), ('(1, 2)', 2, '[(1, 2)]')] @@ -117,22 +113,11 @@ def test_object_resolver__dict__non_strings(): obj = MyObject() dictionary = clear_contents_dictionary(default_resolver.get_dictionary(obj)) - if IS_PY2: - assert 'attribute name must be string' in dictionary.pop('(1, 2)') - assert dictionary == {} - else: - assert dictionary == {'(1, 2)': (3, 4)} + assert dictionary == {'(1, 2)': (3, 4)} contents_debug_adapter_protocol = clear_contents_debug_adapter_protocol( default_resolver.get_contents_debug_adapter_protocol(obj)) - if IS_PY2: - assert len(contents_debug_adapter_protocol) == 1 - entry = contents_debug_adapter_protocol[0] - assert entry[0] == '(1, 2)' - assert 'attribute name must be string' in entry[1] - assert entry[2] == '.(1, 2)' - else: - assert contents_debug_adapter_protocol == [('(1, 2)', (3, 4), '.__dict__[(1, 2)]')] + assert contents_debug_adapter_protocol == [('(1, 2)', (3, 4), '.__dict__[(1, 2)]')] def test_django_forms_resolver(): @@ -148,19 +133,11 @@ def test_django_forms_resolver(): obj = MyObject() dictionary = clear_contents_dictionary(django_form_resolver.get_dictionary(obj)) - if IS_PY2: - assert 'attribute name must be string' in dictionary.pop('(1, 2)') - assert dictionary == {'errors': None} - else: - assert dictionary == {'(1, 2)': (3, 4), 'errors': None} + assert dictionary == {'(1, 2)': (3, 4), 'errors': None} obj._errors = 'bar' dictionary = clear_contents_dictionary(django_form_resolver.get_dictionary(obj)) - if IS_PY2: - assert 'attribute name must be string' in dictionary.pop('(1, 2)') - assert dictionary == {'errors': 'bar', '_errors': 'bar'} - else: - assert dictionary == {'(1, 2)': (3, 4), 'errors': 'bar', '_errors': 'bar'} + assert dictionary == {'(1, 2)': (3, 4), 'errors': 'bar', '_errors': 'bar'} def clear_contents_debug_adapter_protocol(contents_debug_adapter_protocol): diff --git a/src/debugpy/_vendored/pydevd/tests_python/test_run.py b/src/debugpy/_vendored/pydevd/tests_python/test_run.py index 054e7a93..197223cb 100644 --- a/src/debugpy/_vendored/pydevd/tests_python/test_run.py +++ b/src/debugpy/_vendored/pydevd/tests_python/test_run.py @@ -29,12 +29,8 @@ def test_run(testdir_or_pytester): import sys import os - if debugger_unittest.IS_PY3K: - foo_dir = debugger_unittest._get_debugger_test_file(os.path.join('resources', 'launch', 'foo')) - foo_module = 'tests_python.resources.launch.foo' - else: - foo_dir = debugger_unittest._get_debugger_test_file(os.path.join('resources', 'launch_py2', 'foo')) - foo_module = 'tests_python.resources.launch_py2.foo' + foo_dir = debugger_unittest._get_debugger_test_file(os.path.join('resources', 'launch', 'foo')) + foo_module = 'tests_python.resources.launch.foo' pydevd_dir = os.path.dirname(os.path.dirname(__file__)) assert os.path.exists(os.path.join(pydevd_dir, 'pydevd.py')) diff --git a/src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py b/src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py index 95d4370e..37f54d16 100644 --- a/src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py +++ b/src/debugpy/_vendored/pydevd/tests_python/test_safe_repr.py @@ -5,7 +5,7 @@ import re import pytest from _pydevd_bundle.pydevd_safe_repr import SafeRepr import json -from _pydevd_bundle.pydevd_constants import IS_JYTHON, IS_PY2, IS_PY36_OR_GREATER +from _pydevd_bundle.pydevd_constants import IS_JYTHON, IS_PY36_OR_GREATER try: import numpy as np @@ -604,98 +604,6 @@ class TestNumpy(SafeReprTestBase): self.assert_unchanged(value, repr(value)) -@pytest.mark.parametrize('params', [ - # In python 2, unicode slicing may or may not work well depending on whether it's a ucs-2 or - # ucs-4 build (so, we have to strip the high-surrogate if it's ucs-2 and the number of chars - # will be different). - - {'maxother_outer': 20, 'input': u"😄😄😄😄😄😄😄😄😄😄😄😄😄😄😄😄😄😄F😄FF😄F", 'output': (u"😄😄😄😄😄😄...FF😄F", u"😄😄😄😄😄😄😄😄😄😄😄😄😄...F😄FF😄F"), 'output_str': ("u'\\U0001f604\\U0001f604\\U0001f604\\U0001f604\\U0001f604\\U0001f604\\ud83d...\\ude04FF\\U0001f604F'", "u'\\U0001f604\\U0001f604\\U0001f604\\U0001f604\\U0001f604\\U0001f604\\U0001f604\\U0001f604\\U0001f604\\U0001f604\\U0001f604\\U0001f604\\U0001f604...F\\U0001f604FF\\U0001f604F'")}, - - {'maxother_outer': 20, 'input': u"😄😄😄😄😄😄😄😄😄😄😄😄😄😄😄😄😄😄FFFFFFFF", 'output': (u"😄😄😄😄😄😄...FFFFFF", u"😄😄😄😄😄😄😄😄😄😄😄😄😄...FFFFFF"), 'output_str': ("u'\\U0001f604\\U0001f604\\U0001f604\\U0001f604\\U0001f604\\U0001f604\\ud83d...FFFFFF'", "u'\\U0001f604\\U0001f604\\U0001f604\\U0001f604\\U0001f604\\U0001f604\\U0001f604\\U0001f604\\U0001f604\\U0001f604\\U0001f604\\U0001f604\\U0001f604...FFFFFF'")}, - {'maxother_outer': 20, 'input': u"ðŸŒðŸŒðŸŒðŸŒðŸŒðŸŒðŸŒðŸŒðŸŒðŸŒðŸŒðŸŒðŸŒðŸŒðŸŒðŸŒðŸŒðŸŒFFFFFFFF", 'output': (u"ðŸŒðŸŒðŸŒðŸŒðŸŒðŸŒ...FFFFFF", u"ðŸŒðŸŒðŸŒðŸŒðŸŒðŸŒðŸŒðŸŒðŸŒðŸŒðŸŒðŸŒðŸŒ...FFFFFF"), 'output_str': ("u'\\U0001f310\\U0001f310\\U0001f310\\U0001f310\\U0001f310\\U0001f310\\ud83c...FFFFFF'", "u'\\U0001f310\\U0001f310\\U0001f310\\U0001f310\\U0001f310\\U0001f310\\U0001f310\\U0001f310\\U0001f310\\U0001f310\\U0001f310\\U0001f310\\U0001f310...FFFFFF'")}, - {'maxother_outer': 10, 'input': u"😄😄😄😄😄😄😄😄😄FFFFFFFF", 'output': (u"😄😄😄...FFF", u"😄😄😄😄😄😄...FFF"), 'output_str': ("u'\\U0001f604\\U0001f604\\U0001f604...FFF'", "u'\\U0001f604\\U0001f604\\U0001f604\\U0001f604\\U0001f604\\U0001f604...FFF'")}, - {'maxother_outer': 10, 'input': u"ðŸŒðŸŒðŸŒðŸŒðŸŒðŸŒðŸŒðŸŒðŸŒFFFFFFFF", 'output': (u"ðŸŒðŸŒðŸŒ...FFF", u"ðŸŒðŸŒðŸŒðŸŒðŸŒðŸŒ...FFF"), 'output_str': ("u'\\U0001f310\\U0001f310\\U0001f310...FFF'", "u'\\U0001f310\\U0001f310\\U0001f310\\U0001f310\\U0001f310\\U0001f310...FFF'")}, - - # Regular unicode - {'maxother_outer': 20, 'input': u"ωωωωωωωωωωωωωωωωωωωωωωωFFFFFFFF", 'output': u"ωωωωωωωωωωωωω...FFFFFF", 'output_str': repr(u"ωωωωωωωωωωωωω...FFFFFF")}, - {'maxother_outer': 10, 'input': u"������������FFFFFFFF", 'output': u"������...FFF", 'output_str': repr(u"������...FFF")}, - - # Note: as unicode directly doesn't reach the limit and is not elided. - {'maxother_outer': 20, 'input': u"������������FFFFFFFF", 'output': u"������������F...FFFFFF", 'output_str': repr(u"������������FFFFFFFF")}, - - # Note that we actually get the repr() in this case as we can't decode it with any of the available encodings. - {'maxother_outer': 10, 'input': b'\xed\xbd\xbf\xff\xfe\xfa\xfd' * 10, 'output': b"'\\xed\\...fd'", 'output_str': "'\\xed\\xbd\\xbf\\xff\\xfe\\xfa...\\xfe\\xfa\\xfd'"}, - {'maxother_outer': 20, 'input': b'\xed\xbd\xbf\xff\xfe\xfa\xfd' * 10, 'output': b"'\\xed\\xbd\\xbf...a\\xfd'", 'output_str': "'\\xed\\xbd\\xbf\\xff\\xfe\\xfa\\xfd\\xed\\xbd\\xbf\\xff\\xfe\\xfa...\\xbd\\xbf\\xff\\xfe\\xfa\\xfd'"}, - # Check that we use repr() even if it fits the maxother_outer limit. - {'maxother_outer': 100, 'input': b'\xed\xbd\xbf\xff\xfe\xfa\xfd', 'output': "'\\xed\\xbd\\xbf\\xff\\xfe\\xfa\\xfd'", 'output_str': repr(b'\xed\xbd\xbf\xff\xfe\xfa\xfd')}, - - # Note that with latin1 encoding we can actually decode the string but when encoding back to utf-8 we have garbage - # (couldn't find a good approach to know what to do here as we've actually been able to decode it as - # latin-1 because it's a very permissive encoding). - { - 'maxother_outer': 10, - 'sys_stdout_encoding': 'latin1', - 'input': b'\xed\xbd\xbf\xff\xfe\xfa\xfd' * 10, - 'output': b'\xc3\xad\xc2\xbd\xc2\xbf\xc3\xbf\xc3\xbe\xc3\xba...\xc3\xbe\xc3\xba\xc3\xbd', - 'output_str': "\'\\xed\\xbd\\xbf\\xff\\xfe\\xfa...\\xfe\\xfa\\xfd\'", - }, -]) -@pytest.mark.parametrize('use_str', [True, False]) -@pytest.mark.skipif(not IS_PY2, reason='Py2 specific test.') -def test_py2_bytes_slicing(params, use_str): - safe_repr = SafeRepr() - safe_repr.locale_preferred_encoding = 'ascii' - safe_repr.sys_stdout_encoding = params.get('sys_stdout_encoding', 'ascii') - - safe_repr.maxother_outer = params['maxother_outer'] - - # This is the encoding that we expect back (because json needs to be able to encode it - # later on, so, the return from SafeRepr must always be utf-8 regardless of the input). - encoding = 'utf-8' - - if not use_str: - - class MyObj(object): - - def __repr__(self): - ret = params['input'] - if isinstance(ret, unicode): - ret = ret.encode(encoding) - return ret - - safe_repr_input = MyObj() - else: - safe_repr_input = params['input'] - - computed = safe_repr(safe_repr_input) - - if use_str: - expected_output = params['output_str'] - else: - expected_output = params['output'] - - expect_unicode = False - if isinstance(expected_output, unicode): - expect_unicode = True - if isinstance(expected_output, tuple) and isinstance(expected_output[0], unicode): - expect_unicode = True - - if expect_unicode: - computed = computed.decode(encoding) - if isinstance(expected_output, tuple): - assert computed in expected_output - else: - assert computed == expected_output - else: - if isinstance(expected_output, tuple): - assert computed in expected_output - else: - assert computed == expected_output - - # Check that we can json-encode the return. - assert json.dumps(computed) - - @pytest.mark.parametrize('params', [ {'maxother_outer': 20, 'input': "😄😄😄😄😄😄😄😄😄😄😄😄😄😄😄😄😄😄FFFFFFFF", 'output': '😄😄😄😄😄😄😄😄😄😄😄😄😄...FFFFFF'}, {'maxother_outer': 10, 'input': "😄😄😄😄😄😄😄😄😄😄😄😄😄😄😄😄😄😄FFFFFFFF", 'output': '😄😄😄😄😄😄...FFF'}, @@ -703,7 +611,6 @@ def test_py2_bytes_slicing(params, use_str): # Because we can't return bytes, byte-related tests aren't needed (and str works as it should). ]) -@pytest.mark.skipif(IS_PY2, reason='Py3 specific test') @pytest.mark.parametrize('use_str', [True, False]) def test_py3_str_slicing(params, use_str): # Note: much simpler in python because __repr__ is required to return str @@ -741,10 +648,7 @@ def test_raw_bytes(): obj = b'\xed\xbd\xbf\xff\xfe\xfa\xfd' raw_value_repr = safe_repr(obj) assert isinstance(raw_value_repr, str) # bytes on py2, str on py3 - if IS_PY2: - assert raw_value_repr == obj.decode('latin1').encode('utf-8') - else: - assert raw_value_repr == obj.decode('latin1') + assert raw_value_repr == obj.decode('latin1') def test_raw_unicode(): @@ -753,10 +657,7 @@ def test_raw_unicode(): obj = u'\xed\xbd\xbf\xff\xfe\xfa\xfd' raw_value_repr = safe_repr(obj) assert isinstance(raw_value_repr, str) # bytes on py2, str on py3 - if IS_PY2: - assert raw_value_repr == obj.encode('utf-8') - else: - assert raw_value_repr == obj + assert raw_value_repr == obj def test_no_repr(): diff --git a/src/debugpy/_vendored/pydevd/tests_python/test_tracing_gotchas.py b/src/debugpy/_vendored/pydevd/tests_python/test_tracing_gotchas.py index b120db87..66204021 100644 --- a/src/debugpy/_vendored/pydevd/tests_python/test_tracing_gotchas.py +++ b/src/debugpy/_vendored/pydevd/tests_python/test_tracing_gotchas.py @@ -1,6 +1,5 @@ import pytest import sys -from tests_python.test_debugger import IS_PY26, IS_PY34 from _pydevd_bundle.pydevd_constants import NO_FTRACE from tests_python.debugger_unittest import IS_JYTHON diff --git a/src/debugpy/_vendored/pydevd/tests_python/test_utilities.py b/src/debugpy/_vendored/pydevd/tests_python/test_utilities.py index d8fa3a67..b9dfbe33 100644 --- a/src/debugpy/_vendored/pydevd/tests_python/test_utilities.py +++ b/src/debugpy/_vendored/pydevd/tests_python/test_utilities.py @@ -1,12 +1,11 @@ import threading from _pydevd_bundle.pydevd_utils import convert_dap_log_message_to_expression -from tests_python.debug_constants import IS_PY26, IS_PY3K, TEST_GEVENT, IS_CPYTHON +from tests_python.debug_constants import TEST_GEVENT, IS_CPYTHON import sys -from _pydevd_bundle.pydevd_constants import IS_WINDOWS, IS_PY2, IS_PYPY, IS_JYTHON +from _pydevd_bundle.pydevd_constants import IS_WINDOWS, IS_PYPY, IS_JYTHON import pytest import os -import codecs from _pydevd_bundle.pydevd_thread_lifecycle import pydevd_find_thread_by_id @@ -19,11 +18,8 @@ def test_expression_to_evaluate(): assert _expression_to_evaluate(b' for a in b:\nfoo') == b' for a in b:\nfoo' assert _expression_to_evaluate(b'\tfor a in b:\n\t\tfoo') == b'for a in b:\n\tfoo' - if IS_PY2: - assert _expression_to_evaluate(u' expr') == (codecs.BOM_UTF8 + b'expr') - else: - assert _expression_to_evaluate(u' expr') == u'expr' - assert _expression_to_evaluate(u' for a in expr:\n pass') == u'for a in expr:\npass' + assert _expression_to_evaluate(u' expr') == u'expr' + assert _expression_to_evaluate(u' for a in expr:\n pass') == u'for a in expr:\npass' @pytest.mark.skipif(IS_WINDOWS, reason='Brittle on Windows.') @@ -88,10 +84,7 @@ conftest.py:67: AssertionError error_msg += 'Current main thread not instance of: %s (%s)' % ( threading._MainThread, current_thread.__class__.__mro__,) - try: - from StringIO import StringIO - except: - from io import StringIO + from io import StringIO stream = StringIO() dump_threads(stream=stream) @@ -162,13 +155,11 @@ def test_convert_dap_log_message_to_expression(): 'a (22, 33)} 2' ) - if not IS_PY26: - # Note: set literal not valid for Python 2.6. - assert check_dap_log_message( - 'a {{1: {1}}}', - "'a %s' % ({1: {1}},)", - 'a {1: {1}}' if IS_PY3K else 'a {1: set([1])}', - ) + assert check_dap_log_message( + 'a {{1: {1}}}', + "'a %s' % ({1: {1}},)", + 'a {1: {1}}' + ) # Error condition. assert check_dap_log_message( @@ -180,10 +171,7 @@ def test_convert_dap_log_message_to_expression(): def test_pydevd_log(): from _pydev_bundle import pydev_log - try: - import StringIO as io - except: - import io + import io from _pydev_bundle.pydev_log import log_context stream = io.StringIO() @@ -242,10 +230,7 @@ def test_pydevd_logging_files(tmpdir): import os.path from _pydev_bundle.pydev_log import _LoggingGlobals - try: - import StringIO as io - except: - import io + import io from _pydev_bundle.pydev_log import log_context stream = io.StringIO() @@ -427,13 +412,10 @@ def test_find_main_thread_id(): def test_get_ppid(): from _pydevd_bundle.pydevd_api import PyDevdAPI api = PyDevdAPI() - if IS_PY3K: - # On python 3 we can check that our internal api which is used for Python 2 gives the - # same result as os.getppid. - ppid = os.getppid() - assert api._get_windows_ppid() == ppid - else: - assert api._get_windows_ppid() is not None + # On python 3 we can check that our internal api which is used for Python 2 gives the + # same result as os.getppid. + ppid = os.getppid() + assert api._get_windows_ppid() == ppid def _check_gevent(expect_msg): diff --git a/src/debugpy/_vendored/pydevd/tests_runfiles/test_runfiles.py b/src/debugpy/_vendored/pydevd/tests_runfiles/test_runfiles.py index acfae509..d20b002b 100644 --- a/src/debugpy/_vendored/pydevd/tests_runfiles/test_runfiles.py +++ b/src/debugpy/_vendored/pydevd/tests_runfiles/test_runfiles.py @@ -1,18 +1,15 @@ import os.path import sys -IS_PY26 = sys.version_info[:2] == (2, 6) - IS_JYTHON = sys.platform.find('java') != -1 try: this_file_name = __file__ except NameError: # stupid jython. plain old __file__ isnt working for some reason - import test_runfiles #@UnresolvedImport - importing the module itself + import test_runfiles # @UnresolvedImport - importing the module itself this_file_name = test_runfiles.__file__ - desired_runfiles_path = os.path.normpath(os.path.dirname(this_file_name) + "/..") sys.path.insert(0, desired_runfiles_path) @@ -20,13 +17,12 @@ from _pydev_runfiles import pydev_runfiles_unittest from _pydev_runfiles import pydev_runfiles_xml_rpc from _pydevd_bundle import pydevd_io -#remove existing pydev_runfiles from modules (if any), so that we can be sure we have the correct version +# remove existing pydev_runfiles from modules (if any), so that we can be sure we have the correct version if 'pydev_runfiles' in sys.modules: del sys.modules['pydev_runfiles'] if '_pydev_runfiles.pydev_runfiles' in sys.modules: del sys.modules['_pydev_runfiles.pydev_runfiles'] - from _pydev_runfiles import pydev_runfiles import unittest import tempfile @@ -37,7 +33,7 @@ try: except: from sets import Set as set -#this is an early test because it requires the sys.path changed +# this is an early test because it requires the sys.path changed orig_syspath = sys.path a_file = pydev_runfiles.__file__ pydev_runfiles.PydevTestRunner(pydev_runfiles.Configuration(files_or_dirs=[a_file])) @@ -45,9 +41,10 @@ file_dir = os.path.dirname(os.path.dirname(a_file)) assert file_dir in sys.path sys.path = orig_syspath[:] -#remove it so that we leave it ok for other tests +# remove it so that we leave it ok for other tests sys.path.remove(desired_runfiles_path) + class RunfilesTest(unittest.TestCase): def _setup_scenario( @@ -81,7 +78,6 @@ class RunfilesTest(unittest.TestCase): self.file_dir = [os.path.abspath(os.path.join(desired_runfiles_path, 'tests_runfiles/samples'))] self._setup_scenario(self.file_dir, None) - def test_suite_used(self): for suite in self.all_tests + self.filtered_tests: self.assertTrue(isinstance(suite, pydev_runfiles_unittest.PydevTestSuite)) @@ -134,7 +130,6 @@ class RunfilesTest(unittest.TestCase): configuration = pydev_runfiles.parse_cmdline() self.assertEqual(['*__todo', 'test*bar'], configuration.exclude_tests) - def test___adjust_python_path_works_for_directories(self): orig_syspath = sys.path tempdir = tempfile.gettempdir() @@ -142,7 +137,6 @@ class RunfilesTest(unittest.TestCase): self.assertEqual(1, tempdir in sys.path) sys.path = orig_syspath[:] - def test___is_valid_py_file(self): isvalid = self.MyTestRunner._PydevTestRunner__is_valid_py_file self.assertEqual(1, isvalid("test.py")) @@ -201,18 +195,13 @@ class RunfilesTest(unittest.TestCase): for t in tests: total += t.countTestCases() return total - + def test_runfile_imports(self): from _pydev_runfiles import pydev_runfiles_coverage from _pydev_runfiles import pydev_runfiles_parallel_client from _pydev_runfiles import pydev_runfiles_parallel import pytest - if IS_PY26: - with pytest.raises(AssertionError) as e: - from _pydev_runfiles import pydev_runfiles_pytest2 - assert 'Please upgrade pytest' in str(e) - else: - from _pydev_runfiles import pydev_runfiles_pytest2 + from _pydev_runfiles import pydev_runfiles_pytest2 from _pydev_runfiles import pydev_runfiles_unittest from _pydev_runfiles import pydev_runfiles_xml_rpc from _pydev_runfiles import pydev_runfiles @@ -277,7 +266,6 @@ class RunfilesTest(unittest.TestCase): filtered_tests = self.MyTestRunner.filter_tests(self.all_tests) self.assertEqual(self.count_suite(filtered_tests), 0) - self._setup_scenario(self.file_dir, None, exclude_tests=['*a*']) filtered_tests = self.MyTestRunner.filter_tests(self.all_tests) self.assertEqual(self.count_suite(filtered_tests), 6) @@ -320,31 +308,30 @@ class RunfilesTest(unittest.TestCase): import sys sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'samples')) notifications = [] + class Server: def __init__(self, notifications): self.notifications = notifications def notifyConnected(self): - #This method is called at the very start (in runfiles.py), and we do not check this here + # This method is called at the very start (in runfiles.py), and we do not check this here raise AssertionError('Should not be called from the run tests.') - def notifyTestsCollected(self, number_of_tests): self.notifications.append(('notifyTestsCollected', number_of_tests)) - def notifyStartTest(self, file, test): pass def notifyTest(self, cond, captured_output, error_contents, file, test, time): try: - #I.e.: when marked as Binary in xml-rpc + # I.e.: when marked as Binary in xml-rpc captured_output = captured_output.data except: pass try: - #I.e.: when marked as Binary in xml-rpc + # I.e.: when marked as Binary in xml-rpc error_contents = error_contents.data except: pass @@ -380,13 +367,13 @@ class RunfilesTest(unittest.TestCase): if sys.version_info[:2] <= (2, 6): # The setUpClass is not supported in Python 2.6 (thus we have no collection error). expected = [ - ('notifyTest', 'fail', '', 'AssertionError: Fail test 2', simple_test, 'SampleTest.test_xxxxxx1'), - ('notifyTest', 'ok', '', '', simple_test2, 'YetAnotherSampleTest.test_abc'), - ('notifyTest', 'ok', '', '', simpleClass_test, 'SetUpClassTest.test_blank'), - ('notifyTest', 'ok', '', '', simpleModule_test, 'SetUpModuleTest.test_blank'), - ('notifyTest', 'ok', '', '', simple_test, 'SampleTest.test_xxxxxx2'), - ('notifyTest', 'ok', 'non unique name ran', '', simple_test, 'SampleTest.test_non_unique_name'), - ('notifyTestRunFinished',), + ('notifyTest', 'fail', '', 'AssertionError: Fail test 2', simple_test, 'SampleTest.test_xxxxxx1'), + ('notifyTest', 'ok', '', '', simple_test2, 'YetAnotherSampleTest.test_abc'), + ('notifyTest', 'ok', '', '', simpleClass_test, 'SetUpClassTest.test_blank'), + ('notifyTest', 'ok', '', '', simpleModule_test, 'SetUpModuleTest.test_blank'), + ('notifyTest', 'ok', '', '', simple_test, 'SampleTest.test_xxxxxx2'), + ('notifyTest', 'ok', 'non unique name ran', '', simple_test, 'SampleTest.test_non_unique_name'), + ('notifyTestRunFinished',), ('notifyTestsCollected', 6) ] else: @@ -397,7 +384,7 @@ class RunfilesTest(unittest.TestCase): ('notifyTest', 'ok', '', '', simple_test, 'SampleTest.test_xxxxxx2'), ('notifyTest', 'ok', '', '', simple_test2, 'YetAnotherSampleTest.test_abc'), ] - + if not IS_JYTHON: if 'samples.simpleClass_test' in str(notifications): expected.append(('notifyTest', 'error', '', 'ValueError: This is an INTENTIONAL value error in setUpClass.', @@ -414,7 +401,7 @@ class RunfilesTest(unittest.TestCase): expected.append(('notifyTest', 'ok', '', '', simpleModule_test, 'SetUpModuleTest.test_blank')) expected.append(('notifyTestRunFinished',)) - + expected.sort() new_notifications = [] for notification in expected: diff --git a/src/debugpy/_vendored/pydevd/third_party/cython_json.py b/src/debugpy/_vendored/pydevd/third_party/cython_json.py deleted file mode 100644 index 38ca21be..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/cython_json.py +++ /dev/null @@ -1,320 +0,0 @@ -import Cython -from Cython.Compiler import Nodes -from Cython.Compiler.Errors import CompileError -import sys -import json -import traceback -import os - -sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) - -# Note: Cython has some recursive structures in some classes, so, parsing only what we really -# expect may be a bit better (although our recursion check should get that too). -accepted_info = { - 'PyClassDef': set(['name', 'doc', 'body', 'bases', 'decorators', 'pos']) -} - -def node_to_dict(node, _recurse_level=0, memo=None): - nodeid = id(node) # just to be sure it's checked by identity in the memo - if memo is None: - memo = {} - else: - if nodeid in memo: - # i.e.: prevent Nodes recursion. - return None - memo[nodeid] = 1 - try: - _recurse_level += 1 - assert _recurse_level < 500, "It seems we are recursing..." - - node_name = node.__class__.__name__ - # print((' ' * _recurse_level) + node_name) - if node_name.endswith("Node"): - node_name = node_name[:-4] - data = {"__node__": node_name} - if _recurse_level == 1: - data['__version__'] = Cython.__version__ - - - dct = node.__dict__ - accepted = accepted_info.get(node_name) - if accepted is None: - items = [(key, value) for key, value in dct.items()] - else: - # for key in dct.keys(): - # if key not in accepted: - # print('Skipped: %s' % (key,)) - items = [(key, dct[key]) for key in accepted] - - - for attr_name, attr in items: - if attr_name in ("pos", "position"): - data["line"] = attr[1] - data["col"] = attr[2] - continue - - if isinstance(attr, Nodes.Node): - data[attr_name] = node_to_dict(attr, _recurse_level, memo) - - elif isinstance(attr, (list, tuple)): - lst = [] - - for x in attr: - if isinstance(x, Nodes.Node): - lst.append(node_to_dict(x, _recurse_level, memo)) - - elif isinstance(x, (bytes, str)): - lst.append(x) - - elif hasattr(x, 'encode'): - lst.append(x.encode('utf-8', 'replace')) - - elif isinstance(x, (list, tuple)): - tup = [] - - for y in x: - if isinstance(y, (str, bytes)): - tup.append(y) - elif isinstance(y, Nodes.Node): - tup.append(node_to_dict(y, _recurse_level, memo)) - - lst.append(tup) - - data[attr_name] = lst - - else: - data[attr_name] = str(attr) - finally: - memo.pop(nodeid, None) - - return data - - -def source_to_dict(source, name=None): - from Cython.Compiler.TreeFragment import parse_from_strings, StatListNode - # Right now we don't collect errors, but leave the API compatible already. - collected_errors = [] - - try: - - # Note: we don't use TreeFragment because it formats the code removing empty lines - # (which ends up creating an AST with wrong lines). - if not name: - name = "(tree fragment)" - - mod = t = parse_from_strings(name, source) - t = t.body # Make sure a StatListNode is at the top - if not isinstance(t, StatListNode): - t = StatListNode(pos=mod.pos, stats=[t]) - root = t - except CompileError as e: - return { - 'ast': None, - 'errors': [node_to_dict(e)] - } - except BaseException as e: - as_dict = { - 'ast': None, - 'errors': [{ - '__node__': 'CompileError', 'line': 1, 'col': 1, 'message_only': str(e) - }] - } - return as_dict - - result = {'ast': node_to_dict(root), 'errors': [node_to_dict(e) for e in collected_errors]} - return result - - -from _pydev_bundle import pydev_localhost -HOST = pydev_localhost.get_localhost() # Symbolic name meaning the local host -IS_PYTHON_3_ONWARDS = sys.version_info[0] >= 3 - - -def dbg(s): - sys.stderr.write('%s\n' % (s,)) -# f = open('c:/temp/test.txt', 'a') -# print_ >> f, s -# f.close() - - -SERVER_NAME = 'CythonJson' - - -class Exit(Exception): - pass - - -class CythonJsonServer(object): - - def __init__(self, port): - self.ended = False - self._buffer = b'' - self.port = port - self.socket = None # socket to send messages. - self.exit_process_on_kill = True - - def emulated_sendall(self, msg): - MSGLEN = 1024 * 20 - - totalsent = 0 - while totalsent < MSGLEN: - sent = self.socket.send(msg[totalsent:]) - if sent == 0: - return - totalsent = totalsent + sent - - def send(self, msg): - if not isinstance(msg, bytes): - msg = msg.encode('utf-8', 'replace') - - if not hasattr(self.socket, 'sendall'): - # Older versions (jython 2.1) - self.emulated_sendall(msg) - else: - if IS_PYTHON_3_ONWARDS: - self.socket.sendall(msg) - else: - self.socket.sendall(msg) - - def connect_to_server(self): - from _pydev_imps._pydev_saved_modules import socket - - self.socket = s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - try: - s.connect((HOST, self.port)) - except: - sys.stderr.write('Error on connect_to_server with parameters: host: %s port: %s\n' % (HOST, self.port)) - raise - - def _read(self, size): - while True: - buffer_len = len(self._buffer) - if buffer_len == size: - ret = self._buffer - self._buffer = b'' - return ret - - if buffer_len > size: - ret = self._buffer[:size] - self._buffer = self._buffer[size:] - return ret - - try: - r = self.socket.recv(max(size - buffer_len, 1024)) - except OSError: - return b'' - if not r: - return b'' - self._buffer += r - - def _read_line(self): - while True: - i = self._buffer.find(b'\n') - if i != -1: - i += 1 # Add the newline to the return - ret = self._buffer[:i] - self._buffer = self._buffer[i:] - return ret - else: - try: - r = self.socket.recv(1024) - except OSError: - return b'' - if not r: - return b'' - self._buffer += r - - def process_command(self, json_contents): - try: - as_dict = json.loads(json_contents) - if as_dict['command'] == 'cython_to_json_ast': - contents = as_dict['contents'] - as_dict = source_to_dict(contents) - result = as_dict - else: - result = {'command': '', 'received': json_contents} - except: - try: - from StringIO import StringIO - except: - from io import StringIO - s = StringIO() - traceback.print_exc(file=s) - result = {'command': '', 'error': s.getvalue()} - - return json.dumps(result) - - def run(self): - # Echo server program - try: - dbg(SERVER_NAME + ' connecting to java server on %s (%s)' % (HOST, self.port)) - # after being connected, create a socket as a client. - self.connect_to_server() - - dbg(SERVER_NAME + ' Connected to java server') - - content_len = -1 - while True: - dbg('Will read line...') - line = self._read_line() - dbg('Read: %s' % (line,)) - if not line: - raise Exit() - - if line.startswith(b'Content-Length:'): - content_len = int(line.strip().split(b':', 1)[1]) - dbg('Found content len: %s' % (content_len,)) - continue - - if content_len != -1: - # If we previously received a content length, read until a '\r\n'. - if line == b'\r\n': - dbg('Will read contents (%s)...' % (content_len,)) - json_contents = self._read(content_len) - dbg('Read: %s' % (json_contents,)) - content_len = -1 - - if len(json_contents) == 0: - raise Exit() - - # We just received a json message, let's process it. - dbg('Will process...') - output = self.process_command(json_contents) - if not isinstance(output, bytes): - output = output.encode('utf-8', 'replace') - - self.send('Content-Length: %s\r\n\r\n' % (len(output),)) - self.send(output) - - continue - - except Exit: - sys.exit(0) - except: - traceback.print_exc() - raise - - -if __name__ == '__main__': - args = sys.argv[1:] - if args == ['-']: - # Read from stdin/dump to stdout - if sys.version_info < (3,): - stdin_get_value = sys.stdin.read - else: - stdin_get_value = sys.stdin.buffer.read - - source = stdin_get_value() - # After reading, convert to unicode (use the stdout encoding) - source = source.decode(sys.stdout.encoding, 'replace') - as_dict = source_to_dict(source) - print(json.dumps(as_dict, indent=4)) - sys.stdout.flush() - else: - # start as server - port = int(sys.argv[1]) # this is from where we want to receive messages. - - t = CythonJsonServer(port) - dbg(SERVER_NAME + ' will start') - t.run() - diff --git a/src/debugpy/_vendored/pydevd/third_party/isort_container/isort/__init__.py b/src/debugpy/_vendored/pydevd/third_party/isort_container/isort/__init__.py deleted file mode 100644 index 3063d1ed..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/isort_container/isort/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -"""__init__.py. - -Defines the isort module to include the SortImports utility class as well as any defined settings. - -Copyright (C) 2013 Timothy Edmund Crosley - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the Software without restriction, including without limitation -the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and -to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or -substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -""" - -from __future__ import absolute_import, division, print_function, unicode_literals - -from . import settings -from .isort import SortImports - -__version__ = "4.2.15" diff --git a/src/debugpy/_vendored/pydevd/third_party/isort_container/isort/__main__.py b/src/debugpy/_vendored/pydevd/third_party/isort_container/isort/__main__.py deleted file mode 100644 index 94b1d057..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/isort_container/isort/__main__.py +++ /dev/null @@ -1,3 +0,0 @@ -from isort.main import main - -main() diff --git a/src/debugpy/_vendored/pydevd/third_party/isort_container/isort/hooks.py b/src/debugpy/_vendored/pydevd/third_party/isort_container/isort/hooks.py deleted file mode 100644 index 15b6d408..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/isort_container/isort/hooks.py +++ /dev/null @@ -1,82 +0,0 @@ -"""isort.py. - -Defines a git hook to allow pre-commit warnings and errors about import order. - -usage: - exit_code = git_hook(strict=True) - -Copyright (C) 2015 Helen Sherwood-Taylor - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the Software without restriction, including without limitation -the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and -to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or -substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -""" -import subprocess - -from isort import SortImports - - -def get_output(command): - """ - Run a command and return raw output - - :param str command: the command to run - :returns: the stdout output of the command - """ - return subprocess.check_output(command.split()) - - -def get_lines(command): - """ - Run a command and return lines of output - - :param str command: the command to run - :returns: list of whitespace-stripped lines output by command - """ - stdout = get_output(command) - return [line.strip().decode('utf-8') for line in stdout.splitlines()] - - -def git_hook(strict=False): - """ - Git pre-commit hook to check staged files for isort errors - - :param bool strict - if True, return number of errors on exit, - causing the hook to fail. If False, return zero so it will - just act as a warning. - - :return number of errors if in strict mode, 0 otherwise. - """ - - # Get list of files modified and staged - diff_cmd = "git diff-index --cached --name-only --diff-filter=ACMRTUXB HEAD" - files_modified = get_lines(diff_cmd) - - errors = 0 - for filename in files_modified: - if filename.endswith('.py'): - # Get the staged contents of the file - staged_cmd = "git show :%s" % filename - staged_contents = get_output(staged_cmd) - - sort = SortImports( - file_path=filename, - file_contents=staged_contents.decode(), - check=True - ) - - if sort.incorrectly_sorted: - errors += 1 - - return errors if strict else 0 diff --git a/src/debugpy/_vendored/pydevd/third_party/isort_container/isort/isort.py b/src/debugpy/_vendored/pydevd/third_party/isort_container/isort/isort.py deleted file mode 100644 index cecd5af9..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/isort_container/isort/isort.py +++ /dev/null @@ -1,969 +0,0 @@ -"""isort.py. - -Exposes a simple library to sort through imports within Python code - -usage: - SortImports(file_name) -or: - sorted = SortImports(file_contents=file_contents).output - -Copyright (C) 2013 Timothy Edmund Crosley - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the Software without restriction, including without limitation -the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and -to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or -substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -""" -from __future__ import absolute_import, division, print_function, unicode_literals - -import copy -import io -import itertools -import os -import re -import sys -from collections import namedtuple -from datetime import datetime -from difflib import unified_diff -from fnmatch import fnmatch -from glob import glob - -from . import settings -from .natural import nsorted -from .pie_slice import OrderedDict, OrderedSet, input, itemsview - -KNOWN_SECTION_MAPPING = { - 'STDLIB': 'STANDARD_LIBRARY', - 'FUTURE': 'FUTURE_LIBRARY', - 'FIRSTPARTY': 'FIRST_PARTY', - 'THIRDPARTY': 'THIRD_PARTY', -} - - -class SortImports(object): - incorrectly_sorted = False - skipped = False - - def __init__(self, file_path=None, file_contents=None, write_to_stdout=False, check=False, - show_diff=False, settings_path=None, ask_to_apply=False, **setting_overrides): - if not settings_path and file_path: - settings_path = os.path.dirname(os.path.abspath(file_path)) - settings_path = settings_path or os.getcwd() - - self.config = settings.from_path(settings_path).copy() - for key, value in itemsview(setting_overrides): - access_key = key.replace('not_', '').lower() - # The sections config needs to retain order and can't be converted to a set. - if access_key != 'sections' and type(self.config.get(access_key)) in (list, tuple): - if key.startswith('not_'): - self.config[access_key] = list(set(self.config[access_key]).difference(value)) - else: - self.config[access_key] = list(set(self.config[access_key]).union(value)) - else: - self.config[key] = value - - if self.config['force_alphabetical_sort']: - self.config.update({'force_alphabetical_sort_within_sections': True, - 'no_sections': True, - 'lines_between_types': 1, - 'from_first': True}) - - indent = str(self.config['indent']) - if indent.isdigit(): - indent = " " * int(indent) - else: - indent = indent.strip("'").strip('"') - if indent.lower() == "tab": - indent = "\t" - self.config['indent'] = indent - - self.place_imports = {} - self.import_placements = {} - self.remove_imports = [self._format_simplified(removal) for removal in self.config['remove_imports']] - self.add_imports = [self._format_natural(addition) for addition in self.config['add_imports']] - self._section_comments = ["# " + value for key, value in itemsview(self.config) if - key.startswith('import_heading') and value] - - self.file_encoding = 'utf-8' - file_name = file_path - self.file_path = file_path or "" - if file_path: - file_path = os.path.abspath(file_path) - if settings.should_skip(file_path, self.config): - self.skipped = True - if self.config['verbose']: - print("WARNING: {0} was skipped as it's listed in 'skip' setting" - " or matches a glob in 'skip_glob' setting".format(file_path)) - file_contents = None - elif not file_contents: - self.file_path = file_path - self.file_encoding = coding_check(file_path) - with io.open(file_path, encoding=self.file_encoding) as file_to_import_sort: - file_contents = file_to_import_sort.read() - - if file_contents is None or ("isort:" + "skip_file") in file_contents: - return - - self.in_lines = file_contents.split("\n") - self.original_length = len(self.in_lines) - if (self.original_length > 1 or self.in_lines[:1] not in ([], [""])) or self.config['force_adds']: - for add_import in self.add_imports: - self.in_lines.append(add_import) - self.number_of_lines = len(self.in_lines) - - self.out_lines = [] - self.comments = {'from': {}, 'straight': {}, 'nested': {}, 'above': {'straight': {}, 'from': {}}} - self.imports = OrderedDict() - self.as_map = {} - - section_names = self.config['sections'] - self.sections = namedtuple('Sections', section_names)(*[name for name in section_names]) - for section in itertools.chain(self.sections, self.config['forced_separate']): - self.imports[section] = {'straight': OrderedSet(), 'from': OrderedDict()} - - self.known_patterns = [] - for placement in reversed(self.sections): - known_placement = KNOWN_SECTION_MAPPING.get(placement, placement) - config_key = 'known_{0}'.format(known_placement.lower()) - known_patterns = self.config.get(config_key, []) - for known_pattern in known_patterns: - self.known_patterns.append((re.compile('^' + known_pattern.replace('*', '.*').replace('?', '.?') + '$'), - placement)) - - self.index = 0 - self.import_index = -1 - self._first_comment_index_start = -1 - self._first_comment_index_end = -1 - self._parse() - if self.import_index != -1: - self._add_formatted_imports() - - self.length_change = len(self.out_lines) - self.original_length - while self.out_lines and self.out_lines[-1].strip() == "": - self.out_lines.pop(-1) - self.out_lines.append("") - - self.output = "\n".join(self.out_lines) - if self.config['atomic']: - try: - compile(self._strip_top_comments(self.out_lines), self.file_path, 'exec', 0, 1) - except SyntaxError: - self.output = file_contents - self.incorrectly_sorted = True - try: - compile(self._strip_top_comments(self.in_lines), self.file_path, 'exec', 0, 1) - print("ERROR: {0} isort would have introduced syntax errors, please report to the project!". \ - format(self.file_path)) - except SyntaxError: - print("ERROR: {0} File contains syntax errors.".format(self.file_path)) - - return - if check: - check_output = self.output - check_against = file_contents - if self.config['ignore_whitespace']: - check_output = check_output.replace("\n", "").replace(" ", "") - check_against = check_against.replace("\n", "").replace(" ", "") - - if check_output == check_against: - if self.config['verbose']: - print("SUCCESS: {0} Everything Looks Good!".format(self.file_path)) - return - - print("ERROR: {0} Imports are incorrectly sorted.".format(self.file_path)) - self.incorrectly_sorted = True - if show_diff or self.config['show_diff']: - self._show_diff(file_contents) - elif write_to_stdout: - sys.stdout.write(self.output) - elif file_name and not check: - if ask_to_apply: - if self.output == file_contents: - return - self._show_diff(file_contents) - answer = None - while answer not in ('yes', 'y', 'no', 'n', 'quit', 'q'): - answer = input("Apply suggested changes to '{0}' [y/n/q]?".format(self.file_path)).lower() - if answer in ('no', 'n'): - return - if answer in ('quit', 'q'): - sys.exit(1) - with io.open(self.file_path, encoding=self.file_encoding, mode='w') as output_file: - output_file.write(self.output) - - def _show_diff(self, file_contents): - for line in unified_diff( - file_contents.splitlines(1), - self.output.splitlines(1), - fromfile=self.file_path + ':before', - tofile=self.file_path + ':after', - fromfiledate=str(datetime.fromtimestamp(os.path.getmtime(self.file_path)) - if self.file_path else datetime.now()), - tofiledate=str(datetime.now()) - ): - sys.stdout.write(line) - - @staticmethod - def _strip_top_comments(lines): - """Strips # comments that exist at the top of the given lines""" - lines = copy.copy(lines) - while lines and lines[0].startswith("#"): - lines = lines[1:] - return "\n".join(lines) - - def place_module(self, module_name): - """Tries to determine if a module is a python std import, third party import, or project code: - - if it can't determine - it assumes it is project code - - """ - for forced_separate in self.config['forced_separate']: - # Ensure all forced_separate patterns will match to end of string - path_glob = forced_separate - if not forced_separate.endswith('*'): - path_glob = '%s*' % forced_separate - - if fnmatch(module_name, path_glob) or fnmatch(module_name, '.' + path_glob): - return forced_separate - - if module_name.startswith("."): - return self.sections.LOCALFOLDER - - # Try to find most specific placement instruction match (if any) - parts = module_name.split('.') - module_names_to_check = ['.'.join(parts[:first_k]) for first_k in range(len(parts), 0, -1)] - for module_name_to_check in module_names_to_check: - for pattern, placement in self.known_patterns: - if pattern.match(module_name_to_check): - return placement - - # Use a copy of sys.path to avoid any unintended modifications - # to it - e.g. `+=` used below will change paths in place and - # if not copied, consequently sys.path, which will grow unbounded - # with duplicates on every call to this method. - paths = list(sys.path) - virtual_env = self.config.get('virtual_env') or os.environ.get('VIRTUAL_ENV') - virtual_env_src = False - if virtual_env: - paths += [path for path in glob('{0}/lib/python*/site-packages'.format(virtual_env)) - if path not in paths] - paths += [path for path in glob('{0}/src/*'.format(virtual_env)) if os.path.isdir(path)] - virtual_env_src = '{0}/src/'.format(virtual_env) - - # handle case-insensitive paths on windows - stdlib_lib_prefix = os.path.normcase(get_stdlib_path()) - - for prefix in paths: - module_path = "/".join((prefix, module_name.replace(".", "/"))) - package_path = "/".join((prefix, module_name.split(".")[0])) - is_module = (exists_case_sensitive(module_path + ".py") or - exists_case_sensitive(module_path + ".so")) - is_package = exists_case_sensitive(package_path) and os.path.isdir(package_path) - if is_module or is_package: - if ('site-packages' in prefix or 'dist-packages' in prefix or - (virtual_env and virtual_env_src in prefix)): - return self.sections.THIRDPARTY - elif os.path.normcase(prefix).startswith(stdlib_lib_prefix): - return self.sections.STDLIB - else: - return self.config['default_section'] - - return self.config['default_section'] - - def _get_line(self): - """Returns the current line from the file while incrementing the index.""" - line = self.in_lines[self.index] - self.index += 1 - return line - - @staticmethod - def _import_type(line): - """If the current line is an import line it will return its type (from or straight)""" - if "isort:skip" in line: - return - elif line.startswith('import '): - return "straight" - elif line.startswith('from '): - return "from" - - def _at_end(self): - """returns True if we are at the end of the file.""" - return self.index == self.number_of_lines - - @staticmethod - def _module_key(module_name, config, sub_imports=False, ignore_case=False): - prefix = "" - if ignore_case: - module_name = str(module_name).lower() - else: - module_name = str(module_name) - - if sub_imports and config['order_by_type']: - if module_name.isupper() and len(module_name) > 1: - prefix = "A" - elif module_name[0:1].isupper(): - prefix = "B" - else: - prefix = "C" - module_name = module_name.lower() - return "{0}{1}{2}".format(module_name in config['force_to_top'] and "A" or "B", prefix, - config['length_sort'] and (str(len(module_name)) + ":" + module_name) or module_name) - - def _add_comments(self, comments, original_string=""): - """ - Returns a string with comments added - """ - return comments and "{0} # {1}".format(self._strip_comments(original_string)[0], - "; ".join(comments)) or original_string - - def _wrap(self, line): - """ - Returns an import wrapped to the specified line-length, if possible. - """ - wrap_mode = self.config['multi_line_output'] - if len(line) > self.config['line_length'] and wrap_mode != settings.WrapModes.NOQA: - for splitter in ("import", ".", "as"): - exp = r"\b" + re.escape(splitter) + r"\b" - if re.search(exp, line) and not line.strip().startswith(splitter): - line_parts = re.split(exp, line) - next_line = [] - while (len(line) + 2) > (self.config['wrap_length'] or self.config['line_length']) and line_parts: - next_line.append(line_parts.pop()) - line = splitter.join(line_parts) - if not line: - line = next_line.pop() - - cont_line = self._wrap(self.config['indent'] + splitter.join(next_line).lstrip()) - if self.config['use_parentheses']: - output = "{0}{1} (\n{2}{3}{4})".format( - line, splitter, cont_line, - "," if self.config['include_trailing_comma'] else "", - "\n" if wrap_mode in ( - settings.WrapModes.VERTICAL_HANGING_INDENT, - settings.WrapModes.VERTICAL_GRID_GROUPED, - ) else "") - lines = output.split('\n') - if ' #' in lines[-1] and lines[-1].endswith(')'): - line, comment = lines[-1].split(' #', 1) - lines[-1] = line + ') #' + comment[:-1] - return '\n'.join(lines) - return "{0}{1} \\\n{2}".format(line, splitter, cont_line) - elif len(line) > self.config['line_length'] and wrap_mode == settings.WrapModes.NOQA: - if "# NOQA" not in line: - return "{0} # NOQA".format(line) - - return line - - def _add_straight_imports(self, straight_modules, section, section_output): - for module in straight_modules: - if module in self.remove_imports: - continue - - if module in self.as_map: - import_definition = "import {0} as {1}".format(module, self.as_map[module]) - else: - import_definition = "import {0}".format(module) - - comments_above = self.comments['above']['straight'].pop(module, None) - if comments_above: - section_output.extend(comments_above) - section_output.append(self._add_comments(self.comments['straight'].get(module), import_definition)) - - def _add_from_imports(self, from_modules, section, section_output, ignore_case): - for module in from_modules: - if module in self.remove_imports: - continue - - import_start = "from {0} import ".format(module) - from_imports = self.imports[section]['from'][module] - from_imports = nsorted(from_imports, key=lambda key: self._module_key(key, self.config, True, ignore_case)) - if self.remove_imports: - from_imports = [line for line in from_imports if not "{0}.{1}".format(module, line) in - self.remove_imports] - - for from_import in copy.copy(from_imports): - submodule = module + "." + from_import - import_as = self.as_map.get(submodule, False) - if import_as: - import_definition = "{0} as {1}".format(from_import, import_as) - if self.config['combine_as_imports'] and not ("*" in from_imports and - self.config['combine_star']): - from_imports[from_imports.index(from_import)] = import_definition - else: - import_statement = import_start + import_definition - force_grid_wrap = self.config['force_grid_wrap'] - comments = self.comments['straight'].get(submodule) - import_statement = self._add_comments(comments, self._wrap(import_statement)) - from_imports.remove(from_import) - section_output.append(import_statement) - - - if from_imports: - comments = self.comments['from'].pop(module, ()) - if "*" in from_imports and self.config['combine_star']: - import_statement = self._wrap(self._add_comments(comments, "{0}*".format(import_start))) - elif self.config['force_single_line']: - import_statements = [] - for from_import in from_imports: - single_import_line = self._add_comments(comments, import_start + from_import) - comment = self.comments['nested'].get(module, {}).pop(from_import, None) - if comment: - single_import_line += "{0} {1}".format(comments and ";" or " #", comment) - import_statements.append(self._wrap(single_import_line)) - comments = None - import_statement = "\n".join(import_statements) - else: - star_import = False - if "*" in from_imports: - section_output.append(self._add_comments(comments, "{0}*".format(import_start))) - from_imports.remove('*') - star_import = True - comments = None - - for from_import in copy.copy(from_imports): - comment = self.comments['nested'].get(module, {}).pop(from_import, None) - if comment: - single_import_line = self._add_comments(comments, import_start + from_import) - single_import_line += "{0} {1}".format(comments and ";" or " #", comment) - above_comments = self.comments['above']['from'].pop(module, None) - if above_comments: - section_output.extend(above_comments) - section_output.append(self._wrap(single_import_line)) - from_imports.remove(from_import) - comments = None - - if star_import: - import_statement = import_start + (", ").join(from_imports) - else: - import_statement = self._add_comments(comments, import_start + (", ").join(from_imports)) - if not from_imports: - import_statement = "" - - do_multiline_reformat = False - - force_grid_wrap = self.config['force_grid_wrap'] - if force_grid_wrap and len(from_imports) >= force_grid_wrap: - do_multiline_reformat = True - - if len(import_statement) > self.config['line_length'] and len(from_imports) > 1: - do_multiline_reformat = True - - # If line too long AND have imports AND we are NOT using GRID or VERTICAL wrap modes - if (len(import_statement) > self.config['line_length'] and len(from_imports) > 0 and - self.config['multi_line_output'] not in (1, 0)): - do_multiline_reformat = True - - if do_multiline_reformat: - import_statement = self._multi_line_reformat(import_start, from_imports, comments) - if not do_multiline_reformat and len(import_statement) > self.config['line_length']: - import_statement = self._wrap(import_statement) - - if import_statement: - above_comments = self.comments['above']['from'].pop(module, None) - if above_comments: - section_output.extend(above_comments) - section_output.append(import_statement) - - def _multi_line_reformat(self, import_start, from_imports, comments): - output_mode = settings.WrapModes._fields[self.config['multi_line_output']].lower() - formatter = getattr(self, "_output_" + output_mode, self._output_grid) - dynamic_indent = " " * (len(import_start) + 1) - indent = self.config['indent'] - line_length = self.config['wrap_length'] or self.config['line_length'] - import_statement = formatter(import_start, copy.copy(from_imports), - dynamic_indent, indent, line_length, comments) - if self.config['balanced_wrapping']: - lines = import_statement.split("\n") - line_count = len(lines) - if len(lines) > 1: - minimum_length = min([len(line) for line in lines[:-1]]) - else: - minimum_length = 0 - new_import_statement = import_statement - while (len(lines[-1]) < minimum_length and - len(lines) == line_count and line_length > 10): - import_statement = new_import_statement - line_length -= 1 - new_import_statement = formatter(import_start, copy.copy(from_imports), - dynamic_indent, indent, line_length, comments) - lines = new_import_statement.split("\n") - if import_statement.count('\n') == 0: - return self._wrap(import_statement) - return import_statement - - def _add_formatted_imports(self): - """Adds the imports back to the file. - - (at the index of the first import) sorted alphabetically and split between groups - - """ - sort_ignore_case = self.config['force_alphabetical_sort_within_sections'] - sections = itertools.chain(self.sections, self.config['forced_separate']) - - if self.config['no_sections']: - self.imports['no_sections'] = {'straight': [], 'from': {}} - for section in sections: - self.imports['no_sections']['straight'].extend(self.imports[section].get('straight', [])) - self.imports['no_sections']['from'].update(self.imports[section].get('from', {})) - sections = ('no_sections', ) - - output = [] - for section in sections: - straight_modules = self.imports[section]['straight'] - straight_modules = nsorted(straight_modules, key=lambda key: self._module_key(key, self.config)) - from_modules = self.imports[section]['from'] - from_modules = nsorted(from_modules, key=lambda key: self._module_key(key, self.config)) - - section_output = [] - if self.config['from_first']: - self._add_from_imports(from_modules, section, section_output, sort_ignore_case) - if self.config['lines_between_types'] and from_modules and straight_modules: - section_output.extend([''] * self.config['lines_between_types']) - self._add_straight_imports(straight_modules, section, section_output) - else: - self._add_straight_imports(straight_modules, section, section_output) - if self.config['lines_between_types'] and from_modules and straight_modules: - section_output.extend([''] * self.config['lines_between_types']) - self._add_from_imports(from_modules, section, section_output, sort_ignore_case) - - if self.config['force_sort_within_sections']: - def by_module(line): - section = 'B' - if line.startswith('#'): - return 'AA' - - line = re.sub('^from ', '', line) - line = re.sub('^import ', '', line) - if line.split(' ')[0] in self.config['force_to_top']: - section = 'A' - if not self.config['order_by_type']: - line = line.lower() - return '{0}{1}'.format(section, line) - section_output = nsorted(section_output, key=by_module) - - if section_output: - section_name = section - if section_name in self.place_imports: - self.place_imports[section_name] = section_output - continue - - section_title = self.config.get('import_heading_' + str(section_name).lower(), '') - if section_title: - section_comment = "# {0}".format(section_title) - if not section_comment in self.out_lines[0:1] and not section_comment in self.in_lines[0:1]: - section_output.insert(0, section_comment) - output += section_output + ([''] * self.config['lines_between_sections']) - - while [character.strip() for character in output[-1:]] == [""]: - output.pop() - - output_at = 0 - if self.import_index < self.original_length: - output_at = self.import_index - elif self._first_comment_index_end != -1 and self._first_comment_index_start <= 2: - output_at = self._first_comment_index_end - self.out_lines[output_at:0] = output - - imports_tail = output_at + len(output) - while [character.strip() for character in self.out_lines[imports_tail: imports_tail + 1]] == [""]: - self.out_lines.pop(imports_tail) - - if len(self.out_lines) > imports_tail: - next_construct = "" - self._in_quote = False - tail = self.out_lines[imports_tail:] - for index, line in enumerate(tail): - if not self._skip_line(line) and line.strip(): - if line.strip().startswith("#") and len(tail) > (index + 1) and tail[index + 1].strip(): - continue - next_construct = line - break - - if self.config['lines_after_imports'] != -1: - self.out_lines[imports_tail:0] = ["" for line in range(self.config['lines_after_imports'])] - elif next_construct.startswith("def") or next_construct.startswith("class") or \ - next_construct.startswith("@") or next_construct.startswith("async def"): - self.out_lines[imports_tail:0] = ["", ""] - else: - self.out_lines[imports_tail:0] = [""] - - if self.place_imports: - new_out_lines = [] - for index, line in enumerate(self.out_lines): - new_out_lines.append(line) - if line in self.import_placements: - new_out_lines.extend(self.place_imports[self.import_placements[line]]) - if len(self.out_lines) <= index or self.out_lines[index + 1].strip() != "": - new_out_lines.append("") - self.out_lines = new_out_lines - - def _output_grid(self, statement, imports, white_space, indent, line_length, comments): - statement += "(" + imports.pop(0) - while imports: - next_import = imports.pop(0) - next_statement = self._add_comments(comments, statement + ", " + next_import) - if len(next_statement.split("\n")[-1]) + 1 > line_length: - lines = ['{0}{1}'.format(white_space, next_import.split(" ")[0])] - for part in next_import.split(" ")[1:]: - new_line = '{0} {1}'.format(lines[-1], part) - if len(new_line) + 1 > line_length: - lines.append('{0}{1}'.format(white_space, part)) - else: - lines[-1] = new_line - next_import = '\n'.join(lines) - statement = (self._add_comments(comments, "{0},".format(statement)) + - "\n{0}".format(next_import)) - comments = None - else: - statement += ", " + next_import - return statement + ("," if self.config['include_trailing_comma'] else "") + ")" - - def _output_vertical(self, statement, imports, white_space, indent, line_length, comments): - first_import = self._add_comments(comments, imports.pop(0) + ",") + "\n" + white_space - return "{0}({1}{2}{3})".format( - statement, - first_import, - (",\n" + white_space).join(imports), - "," if self.config['include_trailing_comma'] else "", - ) - - def _output_hanging_indent(self, statement, imports, white_space, indent, line_length, comments): - statement += imports.pop(0) - while imports: - next_import = imports.pop(0) - next_statement = self._add_comments(comments, statement + ", " + next_import) - if len(next_statement.split("\n")[-1]) + 3 > line_length: - next_statement = (self._add_comments(comments, "{0}, \\".format(statement)) + - "\n{0}{1}".format(indent, next_import)) - comments = None - statement = next_statement - return statement - - def _output_vertical_hanging_indent(self, statement, imports, white_space, indent, line_length, comments): - return "{0}({1}\n{2}{3}{4}\n)".format( - statement, - self._add_comments(comments), - indent, - (",\n" + indent).join(imports), - "," if self.config['include_trailing_comma'] else "", - ) - - def _output_vertical_grid_common(self, statement, imports, white_space, indent, line_length, comments): - statement += self._add_comments(comments, "(") + "\n" + indent + imports.pop(0) - while imports: - next_import = imports.pop(0) - next_statement = "{0}, {1}".format(statement, next_import) - if len(next_statement.split("\n")[-1]) + 1 > line_length: - next_statement = "{0},\n{1}{2}".format(statement, indent, next_import) - statement = next_statement - if self.config['include_trailing_comma']: - statement += ',' - return statement - - def _output_vertical_grid(self, statement, imports, white_space, indent, line_length, comments): - return self._output_vertical_grid_common(statement, imports, white_space, indent, line_length, comments) + ")" - - def _output_vertical_grid_grouped(self, statement, imports, white_space, indent, line_length, comments): - return self._output_vertical_grid_common(statement, imports, white_space, indent, line_length, comments) + "\n)" - - def _output_noqa(self, statement, imports, white_space, indent, line_length, comments): - retval = '{0}{1}'.format(statement, ', '.join(imports)) - comment_str = ' '.join(comments) - if comments: - if len(retval) + 4 + len(comment_str) <= line_length: - return '{0} # {1}'.format(retval, comment_str) - else: - if len(retval) <= line_length: - return retval - if comments: - if "NOQA" in comments: - return '{0} # {1}'.format(retval, comment_str) - else: - return '{0} # NOQA {1}'.format(retval, comment_str) - else: - return '{0} # NOQA'.format(retval) - - @staticmethod - def _strip_comments(line, comments=None): - """Removes comments from import line.""" - if comments is None: - comments = [] - - new_comments = False - comment_start = line.find("#") - if comment_start != -1: - comments.append(line[comment_start + 1:].strip()) - new_comments = True - line = line[:comment_start] - - return line, comments, new_comments - - @staticmethod - def _format_simplified(import_line): - import_line = import_line.strip() - if import_line.startswith("from "): - import_line = import_line.replace("from ", "") - import_line = import_line.replace(" import ", ".") - elif import_line.startswith("import "): - import_line = import_line.replace("import ", "") - - return import_line - - @staticmethod - def _format_natural(import_line): - import_line = import_line.strip() - if not import_line.startswith("from ") and not import_line.startswith("import "): - if not "." in import_line: - return "import {0}".format(import_line) - parts = import_line.split(".") - end = parts.pop(-1) - return "from {0} import {1}".format(".".join(parts), end) - - return import_line - - def _skip_line(self, line): - skip_line = self._in_quote - if self.index == 1 and line.startswith("#"): - self._in_top_comment = True - return True - elif self._in_top_comment: - if not line.startswith("#"): - self._in_top_comment = False - self._first_comment_index_end = self.index - 1 - - if '"' in line or "'" in line: - index = 0 - if self._first_comment_index_start == -1 and (line.startswith('"') or line.startswith("'")): - self._first_comment_index_start = self.index - while index < len(line): - if line[index] == "\\": - index += 1 - elif self._in_quote: - if line[index:index + len(self._in_quote)] == self._in_quote: - self._in_quote = False - if self._first_comment_index_end < self._first_comment_index_start: - self._first_comment_index_end = self.index - elif line[index] in ("'", '"'): - long_quote = line[index:index + 3] - if long_quote in ('"""', "'''"): - self._in_quote = long_quote - index += 2 - else: - self._in_quote = line[index] - elif line[index] == "#": - break - index += 1 - - return skip_line or self._in_quote or self._in_top_comment - - def _strip_syntax(self, import_string): - import_string = import_string.replace("_import", "[[i]]") - for remove_syntax in ['\\', '(', ')', ',']: - import_string = import_string.replace(remove_syntax, " ") - import_list = import_string.split() - for key in ('from', 'import'): - if key in import_list: - import_list.remove(key) - import_string = ' '.join(import_list) - import_string = import_string.replace("[[i]]", "_import") - return import_string.replace("{ ", "{|").replace(" }", "|}") - - def _parse(self): - """Parses a python file taking out and categorizing imports.""" - self._in_quote = False - self._in_top_comment = False - while not self._at_end(): - line = self._get_line() - statement_index = self.index - skip_line = self._skip_line(line) - - if line in self._section_comments and not skip_line: - if self.import_index == -1: - self.import_index = self.index - 1 - continue - - if "isort:imports-" in line and line.startswith("#"): - section = line.split("isort:imports-")[-1].split()[0].upper() - self.place_imports[section] = [] - self.import_placements[line] = section - - if ";" in line: - for part in (part.strip() for part in line.split(";")): - if part and not part.startswith("from ") and not part.startswith("import "): - skip_line = True - - import_type = self._import_type(line) - if not import_type or skip_line: - self.out_lines.append(line) - continue - - for line in (line.strip() for line in line.split(";")): - import_type = self._import_type(line) - if not import_type: - self.out_lines.append(line) - continue - - line = line.replace("\t", " ").replace('import*', 'import *') - if self.import_index == -1: - self.import_index = self.index - 1 - - nested_comments = {} - import_string, comments, new_comments = self._strip_comments(line) - stripped_line = [part for part in self._strip_syntax(import_string).strip().split(" ") if part] - - if import_type == "from" and len(stripped_line) == 2 and stripped_line[1] != "*" and new_comments: - nested_comments[stripped_line[-1]] = comments[0] - - if "(" in line.split("#")[0] and not self._at_end(): - while not line.strip().endswith(")") and not self._at_end(): - line, comments, new_comments = self._strip_comments(self._get_line(), comments) - stripped_line = self._strip_syntax(line).strip() - if import_type == "from" and stripped_line and not " " in stripped_line and new_comments: - nested_comments[stripped_line] = comments[-1] - import_string += "\n" + line - else: - while line.strip().endswith("\\"): - line, comments, new_comments = self._strip_comments(self._get_line(), comments) - stripped_line = self._strip_syntax(line).strip() - if import_type == "from" and stripped_line and not " " in stripped_line and new_comments: - nested_comments[stripped_line] = comments[-1] - if import_string.strip().endswith(" import") or line.strip().startswith("import "): - import_string += "\n" + line - else: - import_string = import_string.rstrip().rstrip("\\") + " " + line.lstrip() - - if import_type == "from": - import_string = import_string.replace("import(", "import (") - parts = import_string.split(" import ") - from_import = parts[0].split(" ") - import_string = " import ".join([from_import[0] + " " + "".join(from_import[1:])] + parts[1:]) - - imports = [item.replace("{|", "{ ").replace("|}", " }") for item in - self._strip_syntax(import_string).split()] - if "as" in imports and (imports.index('as') + 1) < len(imports): - while "as" in imports: - index = imports.index('as') - if import_type == "from": - module = imports[0] + "." + imports[index - 1] - self.as_map[module] = imports[index + 1] - else: - module = imports[index - 1] - self.as_map[module] = imports[index + 1] - if not self.config['combine_as_imports']: - self.comments['straight'][module] = comments - comments = [] - del imports[index:index + 2] - if import_type == "from": - import_from = imports.pop(0) - placed_module = self.place_module(import_from) - if placed_module == '': - print( - "WARNING: could not place module {0} of line {1} --" - " Do you need to define a default section?".format(import_from, line) - ) - root = self.imports[placed_module][import_type] - for import_name in imports: - associated_comment = nested_comments.get(import_name) - if associated_comment: - self.comments['nested'].setdefault(import_from, {})[import_name] = associated_comment - comments.pop(comments.index(associated_comment)) - if comments: - self.comments['from'].setdefault(import_from, []).extend(comments) - - if len(self.out_lines) > max(self.import_index, self._first_comment_index_end + 1, 1) - 1: - last = self.out_lines and self.out_lines[-1].rstrip() or "" - while (last.startswith("#") and not last.endswith('"""') and not last.endswith("'''") and not - 'isort:imports-' in last): - self.comments['above']['from'].setdefault(import_from, []).insert(0, self.out_lines.pop(-1)) - if len(self.out_lines) > max(self.import_index - 1, self._first_comment_index_end + 1, 1) - 1: - last = self.out_lines[-1].rstrip() - else: - last = "" - if statement_index - 1 == self.import_index: - self.import_index -= len(self.comments['above']['from'].get(import_from, [])) - - if root.get(import_from, False): - root[import_from].update(imports) - else: - root[import_from] = OrderedSet(imports) - else: - for module in imports: - if comments: - self.comments['straight'][module] = comments - comments = None - - if len(self.out_lines) > max(self.import_index, self._first_comment_index_end + 1, 1) - 1: - - last = self.out_lines and self.out_lines[-1].rstrip() or "" - while (last.startswith("#") and not last.endswith('"""') and not last.endswith("'''") and - not 'isort:imports-' in last): - self.comments['above']['straight'].setdefault(module, []).insert(0, - self.out_lines.pop(-1)) - if len(self.out_lines) > 0: - last = self.out_lines[-1].rstrip() - else: - last = "" - if self.index - 1 == self.import_index: - self.import_index -= len(self.comments['above']['straight'].get(module, [])) - placed_module = self.place_module(module) - if placed_module == '': - print( - "WARNING: could not place module {0} of line {1} --" - " Do you need to define a default section?".format(import_from, line) - ) - self.imports[placed_module][import_type].add(module) - - -def coding_check(fname, default='utf-8'): - - # see https://www.python.org/dev/peps/pep-0263/ - pattern = re.compile(br'coding[:=]\s*([-\w.]+)') - - coding = default - with io.open(fname, 'rb') as f: - for line_number, line in enumerate(f, 1): - groups = re.findall(pattern, line) - if groups: - coding = groups[0].decode('ascii') - break - if line_number > 2: - break - - return coding - - -def get_stdlib_path(): - """Returns the path to the standard lib for the current path installation. - - This function can be dropped and "sysconfig.get_paths()" used directly once Python 2.6 support is dropped. - """ - if sys.version_info >= (2, 7): - import sysconfig - return sysconfig.get_paths()['stdlib'] - else: - return os.path.join(sys.prefix, 'lib') - - -def exists_case_sensitive(path): - """ - Returns if the given path exists and also matches the case on Windows. - - When finding files that can be imported, it is important for the cases to match because while - file os.path.exists("module.py") and os.path.exists("MODULE.py") both return True on Windows, Python - can only import using the case of the real file. - """ - result = os.path.exists(path) - if sys.platform.startswith('win') and result: - directory, basename = os.path.split(path) - result = basename in os.listdir(directory) - return result diff --git a/src/debugpy/_vendored/pydevd/third_party/isort_container/isort/main.py b/src/debugpy/_vendored/pydevd/third_party/isort_container/isort/main.py deleted file mode 100644 index eae7afa5..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/isort_container/isort/main.py +++ /dev/null @@ -1,296 +0,0 @@ -#! /usr/bin/env python -''' Tool for sorting imports alphabetically, and automatically separated into sections. - -Copyright (C) 2013 Timothy Edmund Crosley - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the Software without restriction, including without limitation -the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and -to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or -substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -''' -from __future__ import absolute_import, division, print_function, unicode_literals - -import argparse -import glob -import os -import sys - -import setuptools - -from isort import SortImports, __version__ -from isort.settings import DEFAULT_SECTIONS, default, from_path, should_skip - -from .pie_slice import itemsview - - -INTRO = r""" -/#######################################################################\ - - `sMMy` - .yyyy- ` - ##soos## ./o. - ` ``..-..` ``...`.`` ` ```` ``-ssso``` - .s:-y- .+osssssso/. ./ossss+:so+:` :+o-`/osso:+sssssssso/ - .s::y- osss+.``.`` -ssss+-.`-ossso` ssssso/::..::+ssss:::. - .s::y- /ssss+//:-.` `ssss+ `ssss+ sssso` :ssss` - .s::y- `-/+oossssso/ `ssss/ sssso ssss/ :ssss` - .y-/y- ````:ssss` ossso. :ssss: ssss/ :ssss. - `/so:` `-//::/osss+ `+ssss+-/ossso: /sso- `osssso/. - \/ `-/oooo++/- .:/++:/++/-` .. `://++/. - - - isort your Python imports for you so you don't have to - - VERSION {0} - -\########################################################################/ -""".format(__version__) - - -def iter_source_code(paths, config, skipped): - """Iterate over all Python source files defined in paths.""" - for path in paths: - if os.path.isdir(path): - if should_skip(path, config, os.getcwd()): - skipped.append(path) - continue - - for dirpath, dirnames, filenames in os.walk(path, topdown=True): - for dirname in list(dirnames): - if should_skip(dirname, config, dirpath): - skipped.append(dirname) - dirnames.remove(dirname) - for filename in filenames: - if filename.endswith('.py'): - if should_skip(filename, config, dirpath): - skipped.append(filename) - else: - yield os.path.join(dirpath, filename) - else: - yield path - - -class ISortCommand(setuptools.Command): - """The :class:`ISortCommand` class is used by setuptools to perform - imports checks on registered modules. - """ - - description = "Run isort on modules registered in setuptools" - user_options = [] - - def initialize_options(self): - default_settings = default.copy() - for (key, value) in itemsview(default_settings): - setattr(self, key, value) - - def finalize_options(self): - "Get options from config files." - self.arguments = {} - computed_settings = from_path(os.getcwd()) - for (key, value) in itemsview(computed_settings): - self.arguments[key] = value - - def distribution_files(self): - """Find distribution packages.""" - # This is verbatim from flake8 - if self.distribution.packages: - package_dirs = self.distribution.package_dir or {} - for package in self.distribution.packages: - pkg_dir = package - if package in package_dirs: - pkg_dir = package_dirs[package] - elif '' in package_dirs: - pkg_dir = package_dirs[''] + os.path.sep + pkg_dir - yield pkg_dir.replace('.', os.path.sep) - - if self.distribution.py_modules: - for filename in self.distribution.py_modules: - yield "%s.py" % filename - # Don't miss the setup.py file itself - yield "setup.py" - - def run(self): - arguments = self.arguments - wrong_sorted_files = False - arguments['check'] = True - for path in self.distribution_files(): - for python_file in glob.iglob(os.path.join(path, '*.py')): - try: - incorrectly_sorted = SortImports(python_file, **arguments).incorrectly_sorted - if incorrectly_sorted: - wrong_sorted_files = True - except IOError as e: - print("WARNING: Unable to parse file {0} due to {1}".format(python_file, e)) - if wrong_sorted_files: - exit(1) - - -def create_parser(): - parser = argparse.ArgumentParser(description='Sort Python import definitions alphabetically ' - 'within logical sections.') - parser.add_argument('files', nargs='*', help='One or more Python source files that need their imports sorted.') - parser.add_argument('-y', '--apply', dest='apply', action='store_true', - help='Tells isort to apply changes recursively without asking') - parser.add_argument('-l', '--lines', help='[Deprecated] The max length of an import line (used for wrapping ' - 'long imports).', - dest='line_length', type=int) - parser.add_argument('-w', '--line-width', help='The max length of an import line (used for wrapping long imports).', - dest='line_length', type=int) - parser.add_argument('-s', '--skip', help='Files that sort imports should skip over. If you want to skip multiple ' - 'files you should specify twice: --skip file1 --skip file2.', dest='skip', action='append') - parser.add_argument('-ns', '--dont-skip', help='Files that sort imports should never skip over.', - dest='not_skip', action='append') - parser.add_argument('-sg', '--skip-glob', help='Files that sort imports should skip over.', dest='skip_glob', - action='append') - parser.add_argument('-t', '--top', help='Force specific imports to the top of their appropriate section.', - dest='force_to_top', action='append') - parser.add_argument('-f', '--future', dest='known_future_library', action='append', - help='Force sortImports to recognize a module as part of the future compatibility libraries.') - parser.add_argument('-b', '--builtin', dest='known_standard_library', action='append', - help='Force sortImports to recognize a module as part of the python standard library.') - parser.add_argument('-o', '--thirdparty', dest='known_third_party', action='append', - help='Force sortImports to recognize a module as being part of a third party library.') - parser.add_argument('-p', '--project', dest='known_first_party', action='append', - help='Force sortImports to recognize a module as being part of the current python project.') - parser.add_argument('--virtual-env', dest='virtual_env', - help='Virtual environment to use for determining whether a package is third-party') - parser.add_argument('-m', '--multi-line', dest='multi_line_output', type=int, choices=[0, 1, 2, 3, 4, 5], - help='Multi line output (0-grid, 1-vertical, 2-hanging, 3-vert-hanging, 4-vert-grid, ' - '5-vert-grid-grouped).') - parser.add_argument('-i', '--indent', help='String to place for indents defaults to " " (4 spaces).', - dest='indent', type=str) - parser.add_argument('-a', '--add-import', dest='add_imports', action='append', - help='Adds the specified import line to all files, ' - 'automatically determining correct placement.') - parser.add_argument('-af', '--force-adds', dest='force_adds', action='store_true', - help='Forces import adds even if the original file is empty.') - parser.add_argument('-r', '--remove-import', dest='remove_imports', action='append', - help='Removes the specified import from all files.') - parser.add_argument('-ls', '--length-sort', help='Sort imports by their string length.', - dest='length_sort', action='store_true') - parser.add_argument('-d', '--stdout', help='Force resulting output to stdout, instead of in-place.', - dest='write_to_stdout', action='store_true') - parser.add_argument('-c', '--check-only', action='store_true', dest="check", - help='Checks the file for unsorted / unformatted imports and prints them to the ' - 'command line without modifying the file.') - parser.add_argument('-ws', '--ignore-whitespace', action='store_true', dest="ignore_whitespace", - help='Tells isort to ignore whitespace differences when --check-only is being used.') - parser.add_argument('-sl', '--force-single-line-imports', dest='force_single_line', action='store_true', - help='Forces all from imports to appear on their own line') - parser.add_argument('-ds', '--no-sections', help='Put all imports into the same section bucket', dest='no_sections', - action='store_true') - parser.add_argument('-sd', '--section-default', dest='default_section', - help='Sets the default section for imports (by default FIRSTPARTY) options: ' + - str(DEFAULT_SECTIONS)) - parser.add_argument('-df', '--diff', dest='show_diff', action='store_true', - help="Prints a diff of all the changes isort would make to a file, instead of " - "changing it in place") - parser.add_argument('-e', '--balanced', dest='balanced_wrapping', action='store_true', - help='Balances wrapping to produce the most consistent line length possible') - parser.add_argument('-rc', '--recursive', dest='recursive', action='store_true', - help='Recursively look for Python files of which to sort imports') - parser.add_argument('-ot', '--order-by-type', dest='order_by_type', - action='store_true', help='Order imports by type in addition to alphabetically') - parser.add_argument('-dt', '--dont-order-by-type', dest='dont_order_by_type', - action='store_true', help='Only order imports alphabetically, do not attempt type ordering') - parser.add_argument('-ac', '--atomic', dest='atomic', action='store_true', - help="Ensures the output doesn't save if the resulting file contains syntax errors.") - parser.add_argument('-cs', '--combine-star', dest='combine_star', action='store_true', - help="Ensures that if a star import is present, nothing else is imported from that namespace.") - parser.add_argument('-ca', '--combine-as', dest='combine_as_imports', action='store_true', - help="Combines as imports on the same line.") - parser.add_argument('-tc', '--trailing-comma', dest='include_trailing_comma', action='store_true', - help='Includes a trailing comma on multi line imports that include parentheses.') - parser.add_argument('-v', '--version', action='store_true', dest='show_version') - parser.add_argument('-vb', '--verbose', action='store_true', dest="verbose", - help='Shows verbose output, such as when files are skipped or when a check is successful.') - parser.add_argument('-q', '--quiet', action='store_true', dest="quiet", - help='Shows extra quiet output, only errors are outputted.') - parser.add_argument('-sp', '--settings-path', dest="settings_path", - help='Explicitly set the settings path instead of auto determining based on file location.') - parser.add_argument('-ff', '--from-first', dest='from_first', - help="Switches the typical ordering preference, showing from imports first then straight ones.") - parser.add_argument('-wl', '--wrap-length', dest='wrap_length', - help="Specifies how long lines that are wrapped should be, if not set line_length is used.") - parser.add_argument('-fgw', '--force-grid-wrap', nargs='?', const=2, type=int, dest="force_grid_wrap", - help='Force number of from imports (defaults to 2) to be grid wrapped regardless of line ' - 'length') - parser.add_argument('-fass', '--force-alphabetical-sort-within-sections', action='store_true', - dest="force_alphabetical_sort", help='Force all imports to be sorted alphabetically within a ' - 'section') - parser.add_argument('-fas', '--force-alphabetical-sort', action='store_true', dest="force_alphabetical_sort", - help='Force all imports to be sorted as a single section') - parser.add_argument('-fss', '--force-sort-within-sections', action='store_true', dest="force_sort_within_sections", - help='Force imports to be sorted by module, independent of import_type') - parser.add_argument('-lbt', '--lines-between-types', dest='lines_between_types', type=int) - parser.add_argument('-up', '--use-parentheses', dest='use_parentheses', action='store_true', - help='Use parenthesis for line continuation on lenght limit instead of slashes.') - - arguments = dict((key, value) for (key, value) in itemsview(vars(parser.parse_args())) if value) - if 'dont_order_by_type' in arguments: - arguments['order_by_type'] = False - return arguments - - -def main(): - arguments = create_parser() - if arguments.get('show_version'): - print(INTRO) - return - - if 'settings_path' in arguments: - sp = arguments['settings_path'] - arguments['settings_path'] = os.path.abspath(sp) if os.path.isdir(sp) else os.path.dirname(os.path.abspath(sp)) - - file_names = arguments.pop('files', []) - if file_names == ['-']: - SortImports(file_contents=sys.stdin.read(), write_to_stdout=True, **arguments) - else: - if not file_names: - file_names = ['.'] - arguments['recursive'] = True - if not arguments.get('apply', False): - arguments['ask_to_apply'] = True - config = from_path(os.path.abspath(file_names[0]) or os.getcwd()).copy() - config.update(arguments) - wrong_sorted_files = False - skipped = [] - if arguments.get('recursive', False): - file_names = iter_source_code(file_names, config, skipped) - num_skipped = 0 - if config['verbose'] or config.get('show_logo', False): - print(INTRO) - for file_name in file_names: - try: - sort_attempt = SortImports(file_name, **arguments) - incorrectly_sorted = sort_attempt.incorrectly_sorted - if arguments.get('check', False) and incorrectly_sorted: - wrong_sorted_files = True - if sort_attempt.skipped: - num_skipped += 1 - except IOError as e: - print("WARNING: Unable to parse file {0} due to {1}".format(file_name, e)) - if wrong_sorted_files: - exit(1) - - num_skipped += len(skipped) - if num_skipped and not arguments.get('quiet', False): - if config['verbose']: - for was_skipped in skipped: - print("WARNING: {0} was skipped as it's listed in 'skip' setting" - " or matches a glob in 'skip_glob' setting".format(was_skipped)) - print("Skipped {0} files".format(num_skipped)) - - -if __name__ == "__main__": - main() diff --git a/src/debugpy/_vendored/pydevd/third_party/isort_container/isort/natural.py b/src/debugpy/_vendored/pydevd/third_party/isort_container/isort/natural.py deleted file mode 100644 index aac8c4a3..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/isort_container/isort/natural.py +++ /dev/null @@ -1,47 +0,0 @@ -"""isort/natural.py. - -Enables sorting strings that contain numbers naturally - -usage: - natural.nsorted(list) - -Copyright (C) 2013 Timothy Edmund Crosley - -Implementation originally from @HappyLeapSecond stack overflow user in response to: - http://stackoverflow.com/questions/5967500/how-to-correctly-sort-a-string-with-a-number-inside - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the Software without restriction, including without limitation -the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and -to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or -substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -""" -import re - - -def _atoi(text): - return int(text) if text.isdigit() else text - - -def _natural_keys(text): - return [_atoi(c) for c in re.split(r'(\d+)', text)] - - -def nsorted(to_sort, key=None): - """Returns a naturally sorted list""" - if key is None: - key_callback = _natural_keys - else: - def key_callback(item): - return _natural_keys(key(item)) - - return sorted(to_sort, key=key_callback) diff --git a/src/debugpy/_vendored/pydevd/third_party/isort_container/isort/pie_slice.py b/src/debugpy/_vendored/pydevd/third_party/isort_container/isort/pie_slice.py deleted file mode 100644 index 131f325a..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/isort_container/isort/pie_slice.py +++ /dev/null @@ -1,594 +0,0 @@ -"""pie_slice/overrides.py. - -Overrides Python syntax to conform to the Python3 version as much as possible using a '*' import - -Copyright (C) 2013 Timothy Edmund Crosley - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the Software without restriction, including without limitation -the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and -to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or -substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -""" -from __future__ import absolute_import - -import abc -import collections -import functools -import sys -from numbers import Integral - -__version__ = "1.1.0" - -PY2 = sys.version_info[0] == 2 -PY3 = sys.version_info[0] == 3 -VERSION = sys.version_info - -native_dict = dict -native_round = round -native_filter = filter -native_map = map -native_zip = zip -native_range = range -native_str = str -native_chr = chr -native_input = input -native_next = next -native_object = object - -common = ['native_dict', 'native_round', 'native_filter', 'native_map', 'native_range', 'native_str', 'native_chr', - 'native_input', 'PY2', 'PY3', 'u', 'itemsview', 'valuesview', 'keysview', 'execute', 'integer_types', - 'native_next', 'native_object', 'with_metaclass', 'OrderedDict', 'lru_cache'] - - -def with_metaclass(meta, *bases): - """Enables use of meta classes across Python Versions. taken from jinja2/_compat.py. - - Use it like this:: - - class BaseForm(object): - pass - - class FormType(type): - pass - - class Form(with_metaclass(FormType, BaseForm)): - pass - - """ - class metaclass(meta): - __call__ = type.__call__ - __init__ = type.__init__ - - def __new__(cls, name, this_bases, d): - if this_bases is None: - return type.__new__(cls, name, (), d) - return meta(name, bases, d) - return metaclass('temporary_class', None, {}) - - -def unmodified_isinstance(*bases): - """When called in the form - - MyOverrideClass(unmodified_isinstance(BuiltInClass)) - - it allows calls against passed in built in instances to pass even if there not a subclass - - """ - class UnmodifiedIsInstance(type): - if sys.version_info[0] == 2 and sys.version_info[1] <= 6: - - @classmethod - def __instancecheck__(cls, instance): - if cls.__name__ in (str(base.__name__) for base in bases): - return isinstance(instance, bases) - - subclass = getattr(instance, '__class__', None) - subtype = type(instance) - instance_type = getattr(abc, '_InstanceType', None) - if not instance_type: - class test_object: - pass - instance_type = type(test_object) - if subtype is instance_type: - subtype = subclass - if subtype is subclass or subclass is None: - return cls.__subclasscheck__(subtype) - return (cls.__subclasscheck__(subclass) or cls.__subclasscheck__(subtype)) - else: - @classmethod - def __instancecheck__(cls, instance): - if cls.__name__ in (str(base.__name__) for base in bases): - return isinstance(instance, bases) - - return type.__instancecheck__(cls, instance) - - return with_metaclass(UnmodifiedIsInstance, *bases) - - -if PY3: - import urllib - import builtins - from urllib import parse - - input = input - integer_types = (int, ) - - def u(string): - return string - - def itemsview(collection): - return collection.items() - - def valuesview(collection): - return collection.values() - - def keysview(collection): - return collection.keys() - - urllib.quote = parse.quote - urllib.quote_plus = parse.quote_plus - urllib.unquote = parse.unquote - urllib.unquote_plus = parse.unquote_plus - urllib.urlencode = parse.urlencode - execute = getattr(builtins, 'exec') - if VERSION[1] < 2: - def callable(entity): - return hasattr(entity, '__call__') - common.append('callable') - - __all__ = common + ['urllib'] -else: - from itertools import ifilter as filter - from itertools import imap as map - from itertools import izip as zip - from decimal import Decimal, ROUND_HALF_EVEN - - import codecs - str = unicode - chr = unichr - input = raw_input - range = xrange - integer_types = (int, long) - - import sys - stdout = sys.stdout - stderr = sys.stderr -# reload(sys) -# sys.stdout = stdout -# sys.stderr = stderr -# sys.setdefaultencoding('utf-8') - - def _create_not_allowed(name): - def _not_allow(*args, **kwargs): - raise NameError("name '{0}' is not defined".format(name)) - _not_allow.__name__ = name - return _not_allow - - for removed in ('apply', 'cmp', 'coerce', 'execfile', 'raw_input', 'unpacks'): - globals()[removed] = _create_not_allowed(removed) - - def u(s): - if isinstance(s, unicode): - return s - else: - return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") - - def execute(_code_, _globs_=None, _locs_=None): - """Execute code in a namespace.""" - if _globs_ is None: - frame = sys._getframe(1) - _globs_ = frame.f_globals - if _locs_ is None: - _locs_ = frame.f_locals - del frame - elif _locs_ is None: - _locs_ = _globs_ - exec("""exec _code_ in _globs_, _locs_""") - - class _dict_view_base(object): - __slots__ = ('_dictionary', ) - - def __init__(self, dictionary): - self._dictionary = dictionary - - def __repr__(self): - return "{0}({1})".format(self.__class__.__name__, str(list(self.__iter__()))) - - def __unicode__(self): - return str(self.__repr__()) - - def __str__(self): - return str(self.__unicode__()) - - class dict_keys(_dict_view_base): - __slots__ = () - - def __iter__(self): - return self._dictionary.iterkeys() - - class dict_values(_dict_view_base): - __slots__ = () - - def __iter__(self): - return self._dictionary.itervalues() - - class dict_items(_dict_view_base): - __slots__ = () - - def __iter__(self): - return self._dictionary.iteritems() - - def itemsview(collection): - return dict_items(collection) - - def valuesview(collection): - return dict_values(collection) - - def keysview(collection): - return dict_keys(collection) - - class dict(unmodified_isinstance(native_dict)): - def has_key(self, *args, **kwargs): - return AttributeError("'dict' object has no attribute 'has_key'") - - def items(self): - return dict_items(self) - - def keys(self): - return dict_keys(self) - - def values(self): - return dict_values(self) - - def round(number, ndigits=None): - return_int = False - if ndigits is None: - return_int = True - ndigits = 0 - if hasattr(number, '__round__'): - return number.__round__(ndigits) - - if ndigits < 0: - raise NotImplementedError('negative ndigits not supported yet') - exponent = Decimal('10') ** (-ndigits) - d = Decimal.from_float(number).quantize(exponent, - rounding=ROUND_HALF_EVEN) - if return_int: - return int(d) - else: - return float(d) - - def next(iterator): - try: - iterator.__next__() - except Exception: - native_next(iterator) - - class FixStr(type): - def __new__(cls, name, bases, dct): - if '__str__' in dct: - dct['__unicode__'] = dct['__str__'] - dct['__str__'] = lambda self: self.__unicode__().encode('utf-8') - return type.__new__(cls, name, bases, dct) - - if sys.version_info[1] <= 6: - def __instancecheck__(cls, instance): - if cls.__name__ == "object": - return isinstance(instance, native_object) - - subclass = getattr(instance, '__class__', None) - subtype = type(instance) - instance_type = getattr(abc, '_InstanceType', None) - if not instance_type: - class test_object: - pass - instance_type = type(test_object) - if subtype is instance_type: - subtype = subclass - if subtype is subclass or subclass is None: - return cls.__subclasscheck__(subtype) - return (cls.__subclasscheck__(subclass) or cls.__subclasscheck__(subtype)) - else: - def __instancecheck__(cls, instance): - if cls.__name__ == "object": - return isinstance(instance, native_object) - return type.__instancecheck__(cls, instance) - - class object(with_metaclass(FixStr, object)): - pass - - __all__ = common + ['round', 'dict', 'apply', 'cmp', 'coerce', 'execfile', 'raw_input', 'unpacks', 'str', 'chr', - 'input', 'range', 'filter', 'map', 'zip', 'object'] - -if sys.version_info[0] == 2 and sys.version_info[1] < 7: - # OrderedDict - # Copyright (c) 2009 Raymond Hettinger - # - # Permission is hereby granted, free of charge, to any person - # obtaining a copy of this software and associated documentation files - # (the "Software"), to deal in the Software without restriction, - # including without limitation the rights to use, copy, modify, merge, - # publish, distribute, sublicense, and/or sell copies of the Software, - # and to permit persons to whom the Software is furnished to do so, - # subject to the following conditions: - # - # The above copyright notice and this permission notice shall be - # included in all copies or substantial portions of the Software. - # - # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES - # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - # OTHER DEALINGS IN THE SOFTWARE. - - from UserDict import DictMixin - - class OrderedDict(dict, DictMixin): - - def __init__(self, *args, **kwds): - if len(args) > 1: - raise TypeError('expected at most 1 arguments, got %d' % len(args)) - try: - self.__end - except AttributeError: - self.clear() - self.update(*args, **kwds) - - def clear(self): - self.__end = end = [] - end += [None, end, end] # sentinel node for doubly linked list - self.__map = {} # key --> [key, prev, next] - dict.clear(self) - - def __setitem__(self, key, value): - if key not in self: - end = self.__end - curr = end[1] - curr[2] = end[1] = self.__map[key] = [key, curr, end] - dict.__setitem__(self, key, value) - - def __delitem__(self, key): - dict.__delitem__(self, key) - key, prev, next = self.__map.pop(key) - prev[2] = next - next[1] = prev - - def __iter__(self): - end = self.__end - curr = end[2] - while curr is not end: - yield curr[0] - curr = curr[2] - - def __reversed__(self): - end = self.__end - curr = end[1] - while curr is not end: - yield curr[0] - curr = curr[1] - - def popitem(self, last=True): - if not self: - raise KeyError('dictionary is empty') - if last: - key = reversed(self).next() - else: - key = iter(self).next() - value = self.pop(key) - return key, value - - def __reduce__(self): - items = [[k, self[k]] for k in self] - tmp = self.__map, self.__end - del self.__map, self.__end - inst_dict = vars(self).copy() - self.__map, self.__end = tmp - if inst_dict: - return (self.__class__, (items,), inst_dict) - return self.__class__, (items,) - - def keys(self): - return list(self) - - setdefault = DictMixin.setdefault - update = DictMixin.update - pop = DictMixin.pop - values = DictMixin.values - items = DictMixin.items - iterkeys = DictMixin.iterkeys - itervalues = DictMixin.itervalues - iteritems = DictMixin.iteritems - - def __repr__(self): - if not self: - return '%s()' % (self.__class__.__name__,) - return '%s(%r)' % (self.__class__.__name__, self.items()) - - def copy(self): - return self.__class__(self) - - @classmethod - def fromkeys(cls, iterable, value=None): - d = cls() - for key in iterable: - d[key] = value - return d - - def __eq__(self, other): - if isinstance(other, OrderedDict): - if len(self) != len(other): - return False - for p, q in zip(self.items(), other.items()): - if p != q: - return False - return True - return dict.__eq__(self, other) - - def __ne__(self, other): - return not self == other -else: - from collections import OrderedDict - - -if sys.version_info < (3, 2): - try: - from threading import Lock - except ImportError: - from dummy_threading import Lock - - from functools import wraps - - def lru_cache(maxsize=100): - """Least-recently-used cache decorator. - Taking from: https://github.com/MiCHiLU/python-functools32/blob/master/functools32/functools32.py - with slight modifications. - If *maxsize* is set to None, the LRU features are disabled and the cache - can grow without bound. - Arguments to the cached function must be hashable. - View the cache statistics named tuple (hits, misses, maxsize, currsize) with - f.cache_info(). Clear the cache and statistics with f.cache_clear(). - Access the underlying function with f.__wrapped__. - See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used - - """ - def decorating_function(user_function, tuple=tuple, sorted=sorted, len=len, KeyError=KeyError): - hits, misses = [0], [0] - kwd_mark = (object(),) # separates positional and keyword args - lock = Lock() - - if maxsize is None: - CACHE = dict() - - @wraps(user_function) - def wrapper(*args, **kwds): - key = args - if kwds: - key += kwd_mark + tuple(sorted(kwds.items())) - try: - result = CACHE[key] - hits[0] += 1 - return result - except KeyError: - pass - result = user_function(*args, **kwds) - CACHE[key] = result - misses[0] += 1 - return result - else: - CACHE = OrderedDict() - - @wraps(user_function) - def wrapper(*args, **kwds): - key = args - if kwds: - key += kwd_mark + tuple(sorted(kwds.items())) - with lock: - cached = CACHE.get(key, None) - if cached: - del CACHE[key] - CACHE[key] = cached - hits[0] += 1 - return cached - result = user_function(*args, **kwds) - with lock: - CACHE[key] = result # record recent use of this key - misses[0] += 1 - while len(CACHE) > maxsize: - CACHE.popitem(last=False) - return result - - def cache_info(): - """Report CACHE statistics.""" - with lock: - return _CacheInfo(hits[0], misses[0], maxsize, len(CACHE)) - - def cache_clear(): - """Clear the CACHE and CACHE statistics.""" - with lock: - CACHE.clear() - hits[0] = misses[0] = 0 - - wrapper.cache_info = cache_info - wrapper.cache_clear = cache_clear - return wrapper - - return decorating_function - -else: - from functools import lru_cache - - -class OrderedSet(collections.MutableSet): - - def __init__(self, iterable=None): - self.end = end = [] - end += [None, end, end] - self.map = {} - if iterable is not None: - self |= iterable - - def __len__(self): - return len(self.map) - - def __contains__(self, key): - return key in self.map - - def add(self, key): - if key not in self.map: - end = self.end - curr = end[1] - curr[2] = end[1] = self.map[key] = [key, curr, end] - - def discard(self, key): - if key in self.map: - key, prev, next = self.map.pop(key) - prev[2] = next - next[1] = prev - - def __iter__(self): - end = self.end - curr = end[2] - while curr is not end: - yield curr[0] - curr = curr[2] - - def __reversed__(self): - end = self.end - curr = end[1] - while curr is not end: - yield curr[0] - curr = curr[1] - - def pop(self, last=True): - if not self: - raise KeyError('set is empty') - key = self.end[1][0] if last else self.end[2][0] - self.discard(key) - return key - - def __repr__(self): - if not self: - return '%s()' % (self.__class__.__name__,) - return '%s(%r)' % (self.__class__.__name__, list(self)) - - def __eq__(self, other): - if isinstance(other, OrderedSet): - return len(self) == len(other) and list(self) == list(other) - return set(self) == set(other) - - def update(self, other): - for item in other: - self.add(item) diff --git a/src/debugpy/_vendored/pydevd/third_party/isort_container/isort/pylama_isort.py b/src/debugpy/_vendored/pydevd/third_party/isort_container/isort/pylama_isort.py deleted file mode 100644 index 6fa235f9..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/isort_container/isort/pylama_isort.py +++ /dev/null @@ -1,29 +0,0 @@ -import os -import sys - -from pylama.lint import Linter as BaseLinter - -from .isort import SortImports - - -class Linter(BaseLinter): - - def allow(self, path): - """Determine if this path should be linted.""" - return path.endswith('.py') - - def run(self, path, **meta): - """Lint the file. Return an array of error dicts if appropriate.""" - with open(os.devnull, 'w') as devnull: - # Suppress isort messages - sys.stdout = devnull - - if SortImports(path, check=True).incorrectly_sorted: - return [{ - 'lnum': 0, - 'col': 0, - 'text': 'Incorrectly sorted imports.', - 'type': 'ISORT' - }] - else: - return [] diff --git a/src/debugpy/_vendored/pydevd/third_party/isort_container/isort/settings.py b/src/debugpy/_vendored/pydevd/third_party/isort_container/isort/settings.py deleted file mode 100644 index 15cdb210..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/isort_container/isort/settings.py +++ /dev/null @@ -1,256 +0,0 @@ -"""isort/settings.py. - -Defines how the default settings for isort should be loaded - -(First from the default setting dictionary at the top of the file, then overridden by any settings - in ~/.isort.cfg if there are any) - -Copyright (C) 2013 Timothy Edmund Crosley - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the Software without restriction, including without limitation -the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and -to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or -substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -""" -from __future__ import absolute_import, division, print_function, unicode_literals - -import fnmatch -import os -import posixpath -from collections import namedtuple - -from .pie_slice import itemsview, lru_cache, native_str - -try: - import configparser -except ImportError: - import ConfigParser as configparser - -MAX_CONFIG_SEARCH_DEPTH = 25 # The number of parent directories isort will look for a config file within -DEFAULT_SECTIONS = ('FUTURE', 'STDLIB', 'THIRDPARTY', 'FIRSTPARTY', 'LOCALFOLDER') - -WrapModes = ('GRID', 'VERTICAL', 'HANGING_INDENT', 'VERTICAL_HANGING_INDENT', 'VERTICAL_GRID', 'VERTICAL_GRID_GROUPED', 'NOQA') -WrapModes = namedtuple('WrapModes', WrapModes)(*range(len(WrapModes))) - -# Note that none of these lists must be complete as they are simply fallbacks for when included auto-detection fails. -default = {'force_to_top': [], - 'skip': ['__init__.py', ], - 'skip_glob': [], - 'line_length': 79, - 'wrap_length': 0, - 'sections': DEFAULT_SECTIONS, - 'no_sections': False, - 'known_future_library': ['__future__'], - 'known_standard_library': ['AL', 'BaseHTTPServer', 'Bastion', 'CGIHTTPServer', 'Carbon', 'ColorPicker', - 'ConfigParser', 'Cookie', 'DEVICE', 'DocXMLRPCServer', 'EasyDialogs', 'FL', - 'FrameWork', 'GL', 'HTMLParser', 'MacOS', 'MimeWriter', 'MiniAEFrame', 'Nav', - 'PixMapWrapper', 'Queue', 'SUNAUDIODEV', 'ScrolledText', 'SimpleHTTPServer', - 'SimpleXMLRPCServer', 'SocketServer', 'StringIO', 'Tix', 'Tkinter', 'UserDict', - 'UserList', 'UserString', 'W', '__builtin__', 'abc', 'aepack', 'aetools', - 'aetypes', 'aifc', 'al', 'anydbm', 'applesingle', 'argparse', 'array', 'ast', - 'asynchat', 'asyncio', 'asyncore', 'atexit', 'audioop', 'autoGIL', 'base64', - 'bdb', 'binascii', 'binhex', 'bisect', 'bsddb', 'buildtools', 'builtins', - 'bz2', 'cPickle', 'cProfile', 'cStringIO', 'calendar', 'cd', 'cfmfile', 'cgi', - 'cgitb', 'chunk', 'cmath', 'cmd', 'code', 'codecs', 'codeop', 'collections', - 'colorsys', 'commands', 'compileall', 'compiler', 'concurrent', 'configparser', - 'contextlib', 'cookielib', 'copy', 'copy_reg', 'copyreg', 'crypt', 'csv', - 'ctypes', 'curses', 'datetime', 'dbhash', 'dbm', 'decimal', 'difflib', - 'dircache', 'dis', 'distutils', 'dl', 'doctest', 'dumbdbm', 'dummy_thread', - 'dummy_threading', 'email', 'encodings', 'ensurepip', 'enum', 'errno', - 'exceptions', 'faulthandler', 'fcntl', 'filecmp', 'fileinput', 'findertools', - 'fl', 'flp', 'fm', 'fnmatch', 'formatter', 'fpectl', 'fpformat', 'fractions', - 'ftplib', 'functools', 'future_builtins', 'gc', 'gdbm', 'gensuitemodule', - 'getopt', 'getpass', 'gettext', 'gl', 'glob', 'grp', 'gzip', 'hashlib', - 'heapq', 'hmac', 'hotshot', 'html', 'htmlentitydefs', 'htmllib', 'http', - 'httplib', 'ic', 'icopen', 'imageop', 'imaplib', 'imgfile', 'imghdr', 'imp', - 'importlib', 'imputil', 'inspect', 'io', 'ipaddress', 'itertools', 'jpeg', - 'json', 'keyword', 'lib2to3', 'linecache', 'locale', 'logging', 'lzma', - 'macerrors', 'macostools', 'macpath', 'macresource', 'mailbox', 'mailcap', - 'marshal', 'math', 'md5', 'mhlib', 'mimetools', 'mimetypes', 'mimify', 'mmap', - 'modulefinder', 'msilib', 'msvcrt', 'multifile', 'multiprocessing', 'mutex', - 'netrc', 'new', 'nis', 'nntplib', 'numbers', 'operator', 'optparse', 'os', - 'ossaudiodev', 'parser', 'pathlib', 'pdb', 'pickle', 'pickletools', 'pipes', - 'pkgutil', 'platform', 'plistlib', 'popen2', 'poplib', 'posix', 'posixfile', - 'pprint', 'profile', 'pstats', 'pty', 'pwd', 'py_compile', 'pyclbr', 'pydoc', - 'queue', 'quopri', 'random', 're', 'readline', 'reprlib', 'resource', 'rexec', - 'rfc822', 'rlcompleter', 'robotparser', 'runpy', 'sched', 'secrets', 'select', - 'selectors', 'sets', 'sgmllib', 'sha', 'shelve', 'shlex', 'shutil', 'signal', - 'site', 'sitecustomize', 'smtpd', 'smtplib', 'sndhdr', 'socket', 'socketserver', - 'spwd', 'sqlite3', 'ssl', 'stat', 'statistics', 'statvfs', 'string', 'stringprep', - 'struct', 'subprocess', 'sunau', 'sunaudiodev', 'symbol', 'symtable', 'sys', - 'sysconfig', 'syslog', 'tabnanny', 'tarfile', 'telnetlib', 'tempfile', 'termios', - 'test', 'textwrap', 'this', 'thread', 'threading', 'time', 'timeit', 'tkinter', - 'token', 'tokenize', 'trace', 'traceback', 'tracemalloc', 'ttk', 'tty', 'turtle', - 'turtledemo', 'types', 'typing', 'unicodedata', 'unittest', 'urllib', 'urllib2', - 'urlparse', 'user', 'usercustomize', 'uu', 'uuid', 'venv', 'videoreader', - 'warnings', 'wave', 'weakref', 'webbrowser', 'whichdb', 'winreg', 'winsound', - 'wsgiref', 'xdrlib', 'xml', 'xmlrpc', 'xmlrpclib', 'zipapp', 'zipfile', - 'zipimport', 'zlib'], - 'known_third_party': ['google.appengine.api'], - 'known_first_party': [], - 'multi_line_output': WrapModes.GRID, - 'forced_separate': [], - 'indent': ' ' * 4, - 'length_sort': False, - 'add_imports': [], - 'remove_imports': [], - 'force_single_line': False, - 'default_section': 'FIRSTPARTY', - 'import_heading_future': '', - 'import_heading_stdlib': '', - 'import_heading_thirdparty': '', - 'import_heading_firstparty': '', - 'import_heading_localfolder': '', - 'balanced_wrapping': False, - 'use_parentheses': False, - 'order_by_type': True, - 'atomic': False, - 'lines_after_imports': -1, - 'lines_between_sections': 1, - 'lines_between_types': 0, - 'combine_as_imports': False, - 'combine_star': False, - 'include_trailing_comma': False, - 'from_first': False, - 'verbose': False, - 'quiet': False, - 'force_adds': False, - 'force_alphabetical_sort_within_sections': False, - 'force_alphabetical_sort': False, - 'force_grid_wrap': 0, - 'force_sort_within_sections': False, - 'show_diff': False, - 'ignore_whitespace': False} - - -@lru_cache() -def from_path(path): - computed_settings = default.copy() - _update_settings_with_config(path, '.editorconfig', '~/.editorconfig', ('*', '*.py', '**.py'), computed_settings) - _update_settings_with_config(path, '.isort.cfg', '~/.isort.cfg', ('settings', 'isort'), computed_settings) - _update_settings_with_config(path, 'setup.cfg', None, ('isort', ), computed_settings) - _update_settings_with_config(path, 'tox.ini', None, ('isort', ), computed_settings) - return computed_settings - - -def _update_settings_with_config(path, name, default, sections, computed_settings): - editor_config_file = default and os.path.expanduser(default) - tries = 0 - current_directory = path - while current_directory and tries < MAX_CONFIG_SEARCH_DEPTH: - potential_path = os.path.join(current_directory, native_str(name)) - if os.path.exists(potential_path): - editor_config_file = potential_path - break - - new_directory = os.path.split(current_directory)[0] - if current_directory == new_directory: - break - current_directory = new_directory - tries += 1 - - if editor_config_file and os.path.exists(editor_config_file): - _update_with_config_file(editor_config_file, sections, computed_settings) - - -def _update_with_config_file(file_path, sections, computed_settings): - settings = _get_config_data(file_path, sections).copy() - if not settings: - return - - if file_path.endswith('.editorconfig'): - indent_style = settings.pop('indent_style', '').strip() - indent_size = settings.pop('indent_size', '').strip() - if indent_style == 'space': - computed_settings['indent'] = ' ' * (indent_size and int(indent_size) or 4) - elif indent_style == 'tab': - computed_settings['indent'] = '\t' * (indent_size and int(indent_size) or 1) - - max_line_length = settings.pop('max_line_length', '').strip() - if max_line_length: - computed_settings['line_length'] = float('inf') if max_line_length == 'off' else int(max_line_length) - - for key, value in itemsview(settings): - access_key = key.replace('not_', '').lower() - existing_value_type = type(default.get(access_key, '')) - if existing_value_type in (list, tuple): - # sections has fixed order values; no adding or substraction from any set - if access_key == 'sections': - computed_settings[access_key] = tuple(_as_list(value)) - else: - existing_data = set(computed_settings.get(access_key, default.get(access_key))) - if key.startswith('not_'): - computed_settings[access_key] = list(existing_data.difference(_as_list(value))) - else: - computed_settings[access_key] = list(existing_data.union(_as_list(value))) - elif existing_value_type == bool and value.lower().strip() == 'false': - computed_settings[access_key] = False - elif key.startswith('known_'): - computed_settings[access_key] = list(_as_list(value)) - elif key == 'force_grid_wrap': - try: - result = existing_value_type(value) - except ValueError: - # backwards compat - result = default.get(access_key) if value.lower().strip() == 'false' else 2 - computed_settings[access_key] = result - else: - computed_settings[access_key] = existing_value_type(value) - - -def _as_list(value): - return filter(bool, [item.strip() for item in value.replace('\n', ',').split(',')]) - - -@lru_cache() -def _get_config_data(file_path, sections): - with open(file_path, 'rU') as config_file: - if file_path.endswith('.editorconfig'): - line = '\n' - last_position = config_file.tell() - while line: - line = config_file.readline() - if '[' in line: - config_file.seek(last_position) - break - last_position = config_file.tell() - - config = configparser.SafeConfigParser() - config.readfp(config_file) - settings = dict() - for section in sections: - if config.has_section(section): - settings.update(dict(config.items(section))) - - return settings - - return {} - - -def should_skip(filename, config, path='/'): - """Returns True if the file should be skipped based on the passed in settings.""" - for skip_path in config['skip']: - if posixpath.abspath(posixpath.join(path, filename)) == posixpath.abspath(skip_path.replace('\\', '/')): - return True - - position = os.path.split(filename) - while position[1]: - if position[1] in config['skip']: - return True - position = os.path.split(position[0]) - - for glob in config['skip_glob']: - if fnmatch.fnmatch(filename, glob): - return True - - return False diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/autopep8.py b/src/debugpy/_vendored/pydevd/third_party/pep8/autopep8.py deleted file mode 100644 index 7b66d307..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/autopep8.py +++ /dev/null @@ -1,3827 +0,0 @@ -#!/usr/bin/env python - -# Copyright (C) 2010-2011 Hideo Hattori -# Copyright (C) 2011-2013 Hideo Hattori, Steven Myint -# Copyright (C) 2013-2016 Hideo Hattori, Steven Myint, Bill Wendling -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -"""Automatically formats Python code to conform to the PEP 8 style guide. - -Fixes that only need be done once can be added by adding a function of the form -"fix_(source)" to this module. They should return the fixed source code. -These fixes are picked up by apply_global_fixes(). - -Fixes that depend on pycodestyle should be added as methods to FixPEP8. See the -class documentation for more information. - -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import codecs -import collections -import copy -import difflib -import fnmatch -import inspect -import io -import keyword -import locale -import os -import re -import signal -import sys -import textwrap -import token -import tokenize - -import pycodestyle - -def check_lib2to3(): - try: - import lib2to3 - except ImportError: - sys.path.append(os.path.join(os.path.dirname(__file__), 'lib2to3')) - import lib2to3 - - - -try: - unicode -except NameError: - unicode = str - - -__version__ = '1.3' - - -CR = '\r' -LF = '\n' -CRLF = '\r\n' - - -PYTHON_SHEBANG_REGEX = re.compile(r'^#!.*\bpython[23]?\b\s*$') -LAMBDA_REGEX = re.compile(r'([\w.]+)\s=\slambda\s*([\(\)\w,\s.]*):') -COMPARE_NEGATIVE_REGEX = re.compile(r'\b(not)\s+([^][)(}{]+)\s+(in|is)\s') -BARE_EXCEPT_REGEX = re.compile(r'except\s*:') -STARTSWITH_DEF_REGEX = re.compile(r'^(async\s+def|def)\s.*\):') - - -# For generating line shortening candidates. -SHORTEN_OPERATOR_GROUPS = frozenset([ - frozenset([',']), - frozenset(['%']), - frozenset([',', '(', '[', '{']), - frozenset(['%', '(', '[', '{']), - frozenset([',', '(', '[', '{', '%', '+', '-', '*', '/', '//']), - frozenset(['%', '+', '-', '*', '/', '//']), -]) - - -DEFAULT_IGNORE = 'E24,W503' -DEFAULT_INDENT_SIZE = 4 - - -# W602 is handled separately due to the need to avoid "with_traceback". -CODE_TO_2TO3 = { - 'E231': ['ws_comma'], - 'E721': ['idioms'], - 'W601': ['has_key'], - 'W603': ['ne'], - 'W604': ['repr'], - 'W690': ['apply', - 'except', - 'exitfunc', - 'numliterals', - 'operator', - 'paren', - 'reduce', - 'renames', - 'standarderror', - 'sys_exc', - 'throw', - 'tuple_params', - 'xreadlines']} - - -if sys.platform == 'win32': # pragma: no cover - DEFAULT_CONFIG = os.path.expanduser(r'~\.pep8') -else: - DEFAULT_CONFIG = os.path.join(os.getenv('XDG_CONFIG_HOME') or - os.path.expanduser('~/.config'), 'pep8') -PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep8') - - -MAX_PYTHON_FILE_DETECTION_BYTES = 1024 - - -def open_with_encoding(filename, - encoding=None, mode='r', limit_byte_check=-1): - """Return opened file with a specific encoding.""" - if not encoding: - encoding = detect_encoding(filename, limit_byte_check=limit_byte_check) - - return io.open(filename, mode=mode, encoding=encoding, - newline='') # Preserve line endings - - -def detect_encoding(filename, limit_byte_check=-1): - """Return file encoding.""" - try: - with open(filename, 'rb') as input_file: - from lib2to3.pgen2 import tokenize as lib2to3_tokenize - encoding = lib2to3_tokenize.detect_encoding(input_file.readline)[0] - - with open_with_encoding(filename, encoding) as test_file: - test_file.read(limit_byte_check) - - return encoding - except (LookupError, SyntaxError, UnicodeDecodeError): - return 'latin-1' - - -def readlines_from_file(filename): - """Return contents of file.""" - with open_with_encoding(filename) as input_file: - return input_file.readlines() - - -def extended_blank_lines(logical_line, - blank_lines, - blank_before, - indent_level, - previous_logical): - """Check for missing blank lines after class declaration.""" - if previous_logical.startswith('def '): - if blank_lines and pycodestyle.DOCSTRING_REGEX.match(logical_line): - yield (0, 'E303 too many blank lines ({0})'.format(blank_lines)) - elif pycodestyle.DOCSTRING_REGEX.match(previous_logical): - # Missing blank line between class docstring and method declaration. - if ( - indent_level and - not blank_lines and - not blank_before and - logical_line.startswith(('def ')) and - '(self' in logical_line - ): - yield (0, 'E301 expected 1 blank line, found 0') - - -pycodestyle.register_check(extended_blank_lines) - - -def continued_indentation(logical_line, tokens, indent_level, indent_char, - noqa): - """Override pycodestyle's function to provide indentation information.""" - first_row = tokens[0][2][0] - nrows = 1 + tokens[-1][2][0] - first_row - if noqa or nrows == 1: - return - - # indent_next tells us whether the next block is indented. Assuming - # that it is indented by 4 spaces, then we should not allow 4-space - # indents on the final continuation line. In turn, some other - # indents are allowed to have an extra 4 spaces. - indent_next = logical_line.endswith(':') - - row = depth = 0 - valid_hangs = ( - (DEFAULT_INDENT_SIZE,) - if indent_char != '\t' else (DEFAULT_INDENT_SIZE, - 2 * DEFAULT_INDENT_SIZE) - ) - - # Remember how many brackets were opened on each line. - parens = [0] * nrows - - # Relative indents of physical lines. - rel_indent = [0] * nrows - - # For each depth, collect a list of opening rows. - open_rows = [[0]] - # For each depth, memorize the hanging indentation. - hangs = [None] - - # Visual indents. - indent_chances = {} - last_indent = tokens[0][2] - indent = [last_indent[1]] - - last_token_multiline = None - line = None - last_line = '' - last_line_begins_with_multiline = False - for token_type, text, start, end, line in tokens: - - newline = row < start[0] - first_row - if newline: - row = start[0] - first_row - newline = (not last_token_multiline and - token_type not in (tokenize.NL, tokenize.NEWLINE)) - last_line_begins_with_multiline = last_token_multiline - - if newline: - # This is the beginning of a continuation line. - last_indent = start - - # Record the initial indent. - rel_indent[row] = pycodestyle.expand_indent(line) - indent_level - - # Identify closing bracket. - close_bracket = (token_type == tokenize.OP and text in ']})') - - # Is the indent relative to an opening bracket line? - for open_row in reversed(open_rows[depth]): - hang = rel_indent[row] - rel_indent[open_row] - hanging_indent = hang in valid_hangs - if hanging_indent: - break - if hangs[depth]: - hanging_indent = (hang == hangs[depth]) - - visual_indent = (not close_bracket and hang > 0 and - indent_chances.get(start[1])) - - if close_bracket and indent[depth]: - # Closing bracket for visual indent. - if start[1] != indent[depth]: - yield (start, 'E124 {0}'.format(indent[depth])) - elif close_bracket and not hang: - pass - elif indent[depth] and start[1] < indent[depth]: - # Visual indent is broken. - yield (start, 'E128 {0}'.format(indent[depth])) - elif (hanging_indent or - (indent_next and - rel_indent[row] == 2 * DEFAULT_INDENT_SIZE)): - # Hanging indent is verified. - if close_bracket: - yield (start, 'E123 {0}'.format(indent_level + - rel_indent[open_row])) - hangs[depth] = hang - elif visual_indent is True: - # Visual indent is verified. - indent[depth] = start[1] - elif visual_indent in (text, unicode): - # Ignore token lined up with matching one from a previous line. - pass - else: - one_indented = (indent_level + rel_indent[open_row] + - DEFAULT_INDENT_SIZE) - # Indent is broken. - if hang <= 0: - error = ('E122', one_indented) - elif indent[depth]: - error = ('E127', indent[depth]) - elif not close_bracket and hangs[depth]: - error = ('E131', one_indented) - elif hang > DEFAULT_INDENT_SIZE: - error = ('E126', one_indented) - else: - hangs[depth] = hang - error = ('E121', one_indented) - - yield (start, '{0} {1}'.format(*error)) - - # Look for visual indenting. - if ( - parens[row] and - token_type not in (tokenize.NL, tokenize.COMMENT) and - not indent[depth] - ): - indent[depth] = start[1] - indent_chances[start[1]] = True - # Deal with implicit string concatenation. - elif (token_type in (tokenize.STRING, tokenize.COMMENT) or - text in ('u', 'ur', 'b', 'br')): - indent_chances[start[1]] = unicode - # Special case for the "if" statement because len("if (") is equal to - # 4. - elif not indent_chances and not row and not depth and text == 'if': - indent_chances[end[1] + 1] = True - elif text == ':' and line[end[1]:].isspace(): - open_rows[depth].append(row) - - # Keep track of bracket depth. - if token_type == tokenize.OP: - if text in '([{': - depth += 1 - indent.append(0) - hangs.append(None) - if len(open_rows) == depth: - open_rows.append([]) - open_rows[depth].append(row) - parens[row] += 1 - elif text in ')]}' and depth > 0: - # Parent indents should not be more than this one. - prev_indent = indent.pop() or last_indent[1] - hangs.pop() - for d in range(depth): - if indent[d] > prev_indent: - indent[d] = 0 - for ind in list(indent_chances): - if ind >= prev_indent: - del indent_chances[ind] - del open_rows[depth + 1:] - depth -= 1 - if depth: - indent_chances[indent[depth]] = True - for idx in range(row, -1, -1): - if parens[idx]: - parens[idx] -= 1 - break - assert len(indent) == depth + 1 - if ( - start[1] not in indent_chances and - # This is for purposes of speeding up E121 (GitHub #90). - not last_line.rstrip().endswith(',') - ): - # Allow to line up tokens. - indent_chances[start[1]] = text - - last_token_multiline = (start[0] != end[0]) - if last_token_multiline: - rel_indent[end[0] - first_row] = rel_indent[row] - - last_line = line - - if ( - indent_next and - not last_line_begins_with_multiline and - pycodestyle.expand_indent(line) == indent_level + DEFAULT_INDENT_SIZE - ): - pos = (start[0], indent[0] + 4) - desired_indent = indent_level + 2 * DEFAULT_INDENT_SIZE - if visual_indent: - yield (pos, 'E129 {0}'.format(desired_indent)) - else: - yield (pos, 'E125 {0}'.format(desired_indent)) - - -del pycodestyle._checks['logical_line'][pycodestyle.continued_indentation] -pycodestyle.register_check(continued_indentation) - - -class FixPEP8(object): - - """Fix invalid code. - - Fixer methods are prefixed "fix_". The _fix_source() method looks for these - automatically. - - The fixer method can take either one or two arguments (in addition to - self). The first argument is "result", which is the error information from - pycodestyle. The second argument, "logical", is required only for - logical-line fixes. - - The fixer method can return the list of modified lines or None. An empty - list would mean that no changes were made. None would mean that only the - line reported in the pycodestyle error was modified. Note that the modified - line numbers that are returned are indexed at 1. This typically would - correspond with the line number reported in the pycodestyle error - information. - - [fixed method list] - - e111,e114,e115,e116 - - e121,e122,e123,e124,e125,e126,e127,e128,e129 - - e201,e202,e203 - - e211 - - e221,e222,e223,e224,e225 - - e231 - - e251 - - e261,e262 - - e271,e272,e273,e274 - - e301,e302,e303,e304,e306 - - e401 - - e502 - - e701,e702,e703,e704 - - e711,e712,e713,e714 - - e722 - - e731 - - w291 - - w503 - - """ - - def __init__(self, filename, - options, - contents=None, - long_line_ignore_cache=None): - self.filename = filename - if contents is None: - self.source = readlines_from_file(filename) - else: - sio = io.StringIO(contents) - self.source = sio.readlines() - self.options = options - self.indent_word = _get_indentword(''.join(self.source)) - - self.long_line_ignore_cache = ( - set() if long_line_ignore_cache is None - else long_line_ignore_cache) - - # Many fixers are the same even though pycodestyle categorizes them - # differently. - self.fix_e115 = self.fix_e112 - self.fix_e116 = self.fix_e113 - self.fix_e121 = self._fix_reindent - self.fix_e122 = self._fix_reindent - self.fix_e123 = self._fix_reindent - self.fix_e124 = self._fix_reindent - self.fix_e126 = self._fix_reindent - self.fix_e127 = self._fix_reindent - self.fix_e128 = self._fix_reindent - self.fix_e129 = self._fix_reindent - self.fix_e202 = self.fix_e201 - self.fix_e203 = self.fix_e201 - self.fix_e211 = self.fix_e201 - self.fix_e221 = self.fix_e271 - self.fix_e222 = self.fix_e271 - self.fix_e223 = self.fix_e271 - self.fix_e226 = self.fix_e225 - self.fix_e227 = self.fix_e225 - self.fix_e228 = self.fix_e225 - self.fix_e241 = self.fix_e271 - self.fix_e242 = self.fix_e224 - self.fix_e261 = self.fix_e262 - self.fix_e272 = self.fix_e271 - self.fix_e273 = self.fix_e271 - self.fix_e274 = self.fix_e271 - self.fix_e306 = self.fix_e301 - self.fix_e501 = ( - self.fix_long_line_logically if - options and (options.aggressive >= 2 or options.experimental) else - self.fix_long_line_physically) - self.fix_e703 = self.fix_e702 - self.fix_w293 = self.fix_w291 - - def _fix_source(self, results): - try: - (logical_start, logical_end) = _find_logical(self.source) - logical_support = True - except (SyntaxError, tokenize.TokenError): # pragma: no cover - logical_support = False - - completed_lines = set() - for result in sorted(results, key=_priority_key): - if result['line'] in completed_lines: - continue - - fixed_methodname = 'fix_' + result['id'].lower() - if hasattr(self, fixed_methodname): - fix = getattr(self, fixed_methodname) - - line_index = result['line'] - 1 - original_line = self.source[line_index] - - is_logical_fix = len(_get_parameters(fix)) > 2 - if is_logical_fix: - logical = None - if logical_support: - logical = _get_logical(self.source, - result, - logical_start, - logical_end) - if logical and set(range( - logical[0][0] + 1, - logical[1][0] + 1)).intersection( - completed_lines): - continue - - modified_lines = fix(result, logical) - else: - modified_lines = fix(result) - - if modified_lines is None: - # Force logical fixes to report what they modified. - assert not is_logical_fix - - if self.source[line_index] == original_line: - modified_lines = [] - - if modified_lines: - completed_lines.update(modified_lines) - elif modified_lines == []: # Empty list means no fix - if self.options.verbose >= 2: - print( - '---> Not fixing {error} on line {line}'.format( - error=result['id'], line=result['line']), - file=sys.stderr) - else: # We assume one-line fix when None. - completed_lines.add(result['line']) - else: - if self.options.verbose >= 3: - print( - "---> '{0}' is not defined.".format(fixed_methodname), - file=sys.stderr) - - info = result['info'].strip() - print('---> {0}:{1}:{2}:{3}'.format(self.filename, - result['line'], - result['column'], - info), - file=sys.stderr) - - def fix(self): - """Return a version of the source code with PEP 8 violations fixed.""" - pep8_options = { - 'ignore': self.options.ignore, - 'select': self.options.select, - 'max_line_length': self.options.max_line_length, - } - results = _execute_pep8(pep8_options, self.source) - - if self.options.verbose: - progress = {} - for r in results: - if r['id'] not in progress: - progress[r['id']] = set() - progress[r['id']].add(r['line']) - print('---> {n} issue(s) to fix {progress}'.format( - n=len(results), progress=progress), file=sys.stderr) - - if self.options.line_range: - start, end = self.options.line_range - results = [r for r in results - if start <= r['line'] <= end] - - self._fix_source(filter_results(source=''.join(self.source), - results=results, - aggressive=self.options.aggressive)) - - if self.options.line_range: - # If number of lines has changed then change line_range. - count = sum(sline.count('\n') - for sline in self.source[start - 1:end]) - self.options.line_range[1] = start + count - 1 - - return ''.join(self.source) - - def _fix_reindent(self, result): - """Fix a badly indented line. - - This is done by adding or removing from its initial indent only. - - """ - num_indent_spaces = int(result['info'].split()[1]) - line_index = result['line'] - 1 - target = self.source[line_index] - - self.source[line_index] = ' ' * num_indent_spaces + target.lstrip() - - def fix_e112(self, result): - """Fix under-indented comments.""" - line_index = result['line'] - 1 - target = self.source[line_index] - - if not target.lstrip().startswith('#'): - # Don't screw with invalid syntax. - return [] - - self.source[line_index] = self.indent_word + target - - def fix_e113(self, result): - """Fix over-indented comments.""" - line_index = result['line'] - 1 - target = self.source[line_index] - - indent = _get_indentation(target) - stripped = target.lstrip() - - if not stripped.startswith('#'): - # Don't screw with invalid syntax. - return [] - - self.source[line_index] = indent[1:] + stripped - - def fix_e125(self, result): - """Fix indentation undistinguish from the next logical line.""" - num_indent_spaces = int(result['info'].split()[1]) - line_index = result['line'] - 1 - target = self.source[line_index] - - spaces_to_add = num_indent_spaces - len(_get_indentation(target)) - indent = len(_get_indentation(target)) - modified_lines = [] - - while len(_get_indentation(self.source[line_index])) >= indent: - self.source[line_index] = (' ' * spaces_to_add + - self.source[line_index]) - modified_lines.append(1 + line_index) # Line indexed at 1. - line_index -= 1 - - return modified_lines - - def fix_e131(self, result): - """Fix indentation undistinguish from the next logical line.""" - num_indent_spaces = int(result['info'].split()[1]) - line_index = result['line'] - 1 - target = self.source[line_index] - - spaces_to_add = num_indent_spaces - len(_get_indentation(target)) - - if spaces_to_add >= 0: - self.source[line_index] = (' ' * spaces_to_add + - self.source[line_index]) - else: - offset = abs(spaces_to_add) - self.source[line_index] = self.source[line_index][offset:] - - def fix_e201(self, result): - """Remove extraneous whitespace.""" - line_index = result['line'] - 1 - target = self.source[line_index] - offset = result['column'] - 1 - - fixed = fix_whitespace(target, - offset=offset, - replacement='') - - self.source[line_index] = fixed - - def fix_e224(self, result): - """Remove extraneous whitespace around operator.""" - target = self.source[result['line'] - 1] - offset = result['column'] - 1 - fixed = target[:offset] + target[offset:].replace('\t', ' ') - self.source[result['line'] - 1] = fixed - - def fix_e225(self, result): - """Fix missing whitespace around operator.""" - target = self.source[result['line'] - 1] - offset = result['column'] - 1 - fixed = target[:offset] + ' ' + target[offset:] - - # Only proceed if non-whitespace characters match. - # And make sure we don't break the indentation. - if ( - fixed.replace(' ', '') == target.replace(' ', '') and - _get_indentation(fixed) == _get_indentation(target) - ): - self.source[result['line'] - 1] = fixed - else: - return [] - - def fix_e231(self, result): - """Add missing whitespace.""" - line_index = result['line'] - 1 - target = self.source[line_index] - offset = result['column'] - fixed = target[:offset].rstrip() + ' ' + target[offset:].lstrip() - self.source[line_index] = fixed - - def fix_e251(self, result): - """Remove whitespace around parameter '=' sign.""" - line_index = result['line'] - 1 - target = self.source[line_index] - - # This is necessary since pycodestyle sometimes reports columns that - # goes past the end of the physical line. This happens in cases like, - # foo(bar\n=None) - c = min(result['column'] - 1, - len(target) - 1) - - if target[c].strip(): - fixed = target - else: - fixed = target[:c].rstrip() + target[c:].lstrip() - - # There could be an escaped newline - # - # def foo(a=\ - # 1) - if fixed.endswith(('=\\\n', '=\\\r\n', '=\\\r')): - self.source[line_index] = fixed.rstrip('\n\r \t\\') - self.source[line_index + 1] = self.source[line_index + 1].lstrip() - return [line_index + 1, line_index + 2] # Line indexed at 1 - - self.source[result['line'] - 1] = fixed - - def fix_e262(self, result): - """Fix spacing after comment hash.""" - target = self.source[result['line'] - 1] - offset = result['column'] - - code = target[:offset].rstrip(' \t#') - comment = target[offset:].lstrip(' \t#') - - fixed = code + (' # ' + comment if comment.strip() else '\n') - - self.source[result['line'] - 1] = fixed - - def fix_e271(self, result): - """Fix extraneous whitespace around keywords.""" - line_index = result['line'] - 1 - target = self.source[line_index] - offset = result['column'] - 1 - - fixed = fix_whitespace(target, - offset=offset, - replacement=' ') - - if fixed == target: - return [] - else: - self.source[line_index] = fixed - - def fix_e301(self, result): - """Add missing blank line.""" - cr = '\n' - self.source[result['line'] - 1] = cr + self.source[result['line'] - 1] - - def fix_e302(self, result): - """Add missing 2 blank lines.""" - add_linenum = 2 - int(result['info'].split()[-1]) - cr = '\n' * add_linenum - self.source[result['line'] - 1] = cr + self.source[result['line'] - 1] - - def fix_e303(self, result): - """Remove extra blank lines.""" - delete_linenum = int(result['info'].split('(')[1].split(')')[0]) - 2 - delete_linenum = max(1, delete_linenum) - - # We need to count because pycodestyle reports an offset line number if - # there are comments. - cnt = 0 - line = result['line'] - 2 - modified_lines = [] - while cnt < delete_linenum and line >= 0: - if not self.source[line].strip(): - self.source[line] = '' - modified_lines.append(1 + line) # Line indexed at 1 - cnt += 1 - line -= 1 - - return modified_lines - - def fix_e304(self, result): - """Remove blank line following function decorator.""" - line = result['line'] - 2 - if not self.source[line].strip(): - self.source[line] = '' - - def fix_e305(self, result): - """Add missing 2 blank lines after end of function or class.""" - cr = '\n' - # check comment line - offset = result['line'] - 2 - while True: - if offset < 0: - break - line = self.source[offset].lstrip() - if len(line) == 0: - break - if line[0] != '#': - break - offset -= 1 - offset += 1 - self.source[offset] = cr + self.source[offset] - - def fix_e401(self, result): - """Put imports on separate lines.""" - line_index = result['line'] - 1 - target = self.source[line_index] - offset = result['column'] - 1 - - if not target.lstrip().startswith('import'): - return [] - - indentation = re.split(pattern=r'\bimport\b', - string=target, maxsplit=1)[0] - fixed = (target[:offset].rstrip('\t ,') + '\n' + - indentation + 'import ' + target[offset:].lstrip('\t ,')) - self.source[line_index] = fixed - - def fix_long_line_logically(self, result, logical): - """Try to make lines fit within --max-line-length characters.""" - if ( - not logical or - len(logical[2]) == 1 or - self.source[result['line'] - 1].lstrip().startswith('#') - ): - return self.fix_long_line_physically(result) - - start_line_index = logical[0][0] - end_line_index = logical[1][0] - logical_lines = logical[2] - - previous_line = get_item(self.source, start_line_index - 1, default='') - next_line = get_item(self.source, end_line_index + 1, default='') - - single_line = join_logical_line(''.join(logical_lines)) - - try: - fixed = self.fix_long_line( - target=single_line, - previous_line=previous_line, - next_line=next_line, - original=''.join(logical_lines)) - except (SyntaxError, tokenize.TokenError): - return self.fix_long_line_physically(result) - - if fixed: - for line_index in range(start_line_index, end_line_index + 1): - self.source[line_index] = '' - self.source[start_line_index] = fixed - return range(start_line_index + 1, end_line_index + 1) - else: - return [] - - def fix_long_line_physically(self, result): - """Try to make lines fit within --max-line-length characters.""" - line_index = result['line'] - 1 - target = self.source[line_index] - - previous_line = get_item(self.source, line_index - 1, default='') - next_line = get_item(self.source, line_index + 1, default='') - - try: - fixed = self.fix_long_line( - target=target, - previous_line=previous_line, - next_line=next_line, - original=target) - except (SyntaxError, tokenize.TokenError): - return [] - - if fixed: - self.source[line_index] = fixed - return [line_index + 1] - else: - return [] - - def fix_long_line(self, target, previous_line, - next_line, original): - cache_entry = (target, previous_line, next_line) - if cache_entry in self.long_line_ignore_cache: - return [] - - if target.lstrip().startswith('#'): - # Wrap commented lines. - return shorten_comment( - line=target, - max_line_length=self.options.max_line_length, - last_comment=not next_line.lstrip().startswith('#')) - - fixed = get_fixed_long_line( - target=target, - previous_line=previous_line, - original=original, - indent_word=self.indent_word, - max_line_length=self.options.max_line_length, - aggressive=self.options.aggressive, - experimental=self.options.experimental, - verbose=self.options.verbose) - if fixed and not code_almost_equal(original, fixed): - return fixed - else: - self.long_line_ignore_cache.add(cache_entry) - return None - - def fix_e502(self, result): - """Remove extraneous escape of newline.""" - (line_index, _, target) = get_index_offset_contents(result, - self.source) - self.source[line_index] = target.rstrip('\n\r \t\\') + '\n' - - def fix_e701(self, result): - """Put colon-separated compound statement on separate lines.""" - line_index = result['line'] - 1 - target = self.source[line_index] - c = result['column'] - - fixed_source = (target[:c] + '\n' + - _get_indentation(target) + self.indent_word + - target[c:].lstrip('\n\r \t\\')) - self.source[result['line'] - 1] = fixed_source - return [result['line'], result['line'] + 1] - - def fix_e702(self, result, logical): - """Put semicolon-separated compound statement on separate lines.""" - if not logical: - return [] # pragma: no cover - logical_lines = logical[2] - - line_index = result['line'] - 1 - target = self.source[line_index] - - if target.rstrip().endswith('\\'): - # Normalize '1; \\\n2' into '1; 2'. - self.source[line_index] = target.rstrip('\n \r\t\\') - self.source[line_index + 1] = self.source[line_index + 1].lstrip() - return [line_index + 1, line_index + 2] - - if target.rstrip().endswith(';'): - self.source[line_index] = target.rstrip('\n \r\t;') + '\n' - return [line_index + 1] - - offset = result['column'] - 1 - first = target[:offset].rstrip(';').rstrip() - second = (_get_indentation(logical_lines[0]) + - target[offset:].lstrip(';').lstrip()) - - # Find inline comment. - inline_comment = None - if target[offset:].lstrip(';').lstrip()[:2] == '# ': - inline_comment = target[offset:].lstrip(';') - - if inline_comment: - self.source[line_index] = first + inline_comment - else: - self.source[line_index] = first + '\n' + second - return [line_index + 1] - - def fix_e704(self, result): - """Fix multiple statements on one line def""" - (line_index, _, target) = get_index_offset_contents(result, - self.source) - match = STARTSWITH_DEF_REGEX.match(target) - if match: - self.source[line_index] = '{0}\n{1}{2}'.format( - match.group(0), - _get_indentation(target) + self.indent_word, - target[match.end(0):].lstrip()) - - def fix_e711(self, result): - """Fix comparison with None.""" - (line_index, offset, target) = get_index_offset_contents(result, - self.source) - - right_offset = offset + 2 - if right_offset >= len(target): - return [] - - left = target[:offset].rstrip() - center = target[offset:right_offset] - right = target[right_offset:].lstrip() - - if not right.startswith('None'): - return [] - - if center.strip() == '==': - new_center = 'is' - elif center.strip() == '!=': - new_center = 'is not' - else: - return [] - - self.source[line_index] = ' '.join([left, new_center, right]) - - def fix_e712(self, result): - """Fix (trivial case of) comparison with boolean.""" - (line_index, offset, target) = get_index_offset_contents(result, - self.source) - - # Handle very easy "not" special cases. - if re.match(r'^\s*if [\w.]+ == False:$', target): - self.source[line_index] = re.sub(r'if ([\w.]+) == False:', - r'if not \1:', target, count=1) - elif re.match(r'^\s*if [\w.]+ != True:$', target): - self.source[line_index] = re.sub(r'if ([\w.]+) != True:', - r'if not \1:', target, count=1) - else: - right_offset = offset + 2 - if right_offset >= len(target): - return [] - - left = target[:offset].rstrip() - center = target[offset:right_offset] - right = target[right_offset:].lstrip() - - # Handle simple cases only. - new_right = None - if center.strip() == '==': - if re.match(r'\bTrue\b', right): - new_right = re.sub(r'\bTrue\b *', '', right, count=1) - elif center.strip() == '!=': - if re.match(r'\bFalse\b', right): - new_right = re.sub(r'\bFalse\b *', '', right, count=1) - - if new_right is None: - return [] - - if new_right[0].isalnum(): - new_right = ' ' + new_right - - self.source[line_index] = left + new_right - - def fix_e713(self, result): - """Fix (trivial case of) non-membership check.""" - (line_index, _, target) = get_index_offset_contents(result, - self.source) - - match = COMPARE_NEGATIVE_REGEX.search(target) - if match: - if match.group(3) == 'in': - pos_start = match.start(1) - self.source[line_index] = '{0}{1} {2} {3} {4}'.format( - target[:pos_start], match.group(2), match.group(1), - match.group(3), target[match.end():]) - - def fix_e714(self, result): - """Fix object identity should be 'is not' case.""" - (line_index, _, target) = get_index_offset_contents(result, - self.source) - - match = COMPARE_NEGATIVE_REGEX.search(target) - if match: - if match.group(3) == 'is': - pos_start = match.start(1) - self.source[line_index] = '{0}{1} {2} {3} {4}'.format( - target[:pos_start], match.group(2), match.group(3), - match.group(1), target[match.end():]) - - def fix_e722(self, result): - """fix bare except""" - (line_index, _, target) = get_index_offset_contents(result, - self.source) - if BARE_EXCEPT_REGEX.search(target): - self.source[line_index] = '{0}{1}'.format( - target[:result['column'] - 1], "except Exception:") - - def fix_e731(self, result): - """Fix do not assign a lambda expression check.""" - (line_index, _, target) = get_index_offset_contents(result, - self.source) - match = LAMBDA_REGEX.search(target) - if match: - end = match.end() - self.source[line_index] = '{0}def {1}({2}): return {3}'.format( - target[:match.start(0)], match.group(1), match.group(2), - target[end:].lstrip()) - - def fix_w291(self, result): - """Remove trailing whitespace.""" - fixed_line = self.source[result['line'] - 1].rstrip() - self.source[result['line'] - 1] = fixed_line + '\n' - - def fix_w391(self, _): - """Remove trailing blank lines.""" - blank_count = 0 - for line in reversed(self.source): - line = line.rstrip() - if line: - break - else: - blank_count += 1 - - original_length = len(self.source) - self.source = self.source[:original_length - blank_count] - return range(1, 1 + original_length) - - def fix_w503(self, result): - (line_index, _, target) = get_index_offset_contents(result, - self.source) - one_string_token = target.split()[0] - try: - ts = generate_tokens(one_string_token) - except tokenize.TokenError: - return - if not _is_binary_operator(ts[0][0], one_string_token): - return - i = target.index(one_string_token) - self.source[line_index] = '{0}{1}'.format( - target[:i], target[i + len(one_string_token):]) - nl = find_newline(self.source[line_index - 1:line_index]) - before_line = self.source[line_index - 1] - bl = before_line.index(nl) - self.source[line_index - 1] = '{0} {1}{2}'.format( - before_line[:bl], one_string_token, - before_line[bl:]) - - -def get_index_offset_contents(result, source): - """Return (line_index, column_offset, line_contents).""" - line_index = result['line'] - 1 - return (line_index, - result['column'] - 1, - source[line_index]) - - -def get_fixed_long_line(target, previous_line, original, - indent_word=' ', max_line_length=79, - aggressive=False, experimental=False, verbose=False): - """Break up long line and return result. - - Do this by generating multiple reformatted candidates and then - ranking the candidates to heuristically select the best option. - - """ - indent = _get_indentation(target) - source = target[len(indent):] - assert source.lstrip() == source - - # Check for partial multiline. - tokens = list(generate_tokens(source)) - - candidates = shorten_line( - tokens, source, indent, - indent_word, - max_line_length, - aggressive=aggressive, - experimental=experimental, - previous_line=previous_line) - - # Also sort alphabetically as a tie breaker (for determinism). - candidates = sorted( - sorted(set(candidates).union([target, original])), - key=lambda x: line_shortening_rank( - x, - indent_word, - max_line_length, - experimental=experimental)) - - if verbose >= 4: - print(('-' * 79 + '\n').join([''] + candidates + ['']), - file=wrap_output(sys.stderr, 'utf-8')) - - if candidates: - best_candidate = candidates[0] - # Don't allow things to get longer. - if longest_line_length(best_candidate) > longest_line_length(original): - return None - else: - return best_candidate - - -def longest_line_length(code): - """Return length of longest line.""" - return max(len(line) for line in code.splitlines()) - - -def join_logical_line(logical_line): - """Return single line based on logical line input.""" - indentation = _get_indentation(logical_line) - - return indentation + untokenize_without_newlines( - generate_tokens(logical_line.lstrip())) + '\n' - - -def untokenize_without_newlines(tokens): - """Return source code based on tokens.""" - text = '' - last_row = 0 - last_column = -1 - - for t in tokens: - token_string = t[1] - (start_row, start_column) = t[2] - (end_row, end_column) = t[3] - - if start_row > last_row: - last_column = 0 - if ( - (start_column > last_column or token_string == '\n') and - not text.endswith(' ') - ): - text += ' ' - - if token_string != '\n': - text += token_string - - last_row = end_row - last_column = end_column - - return text.rstrip() - - -def _find_logical(source_lines): - # Make a variable which is the index of all the starts of lines. - logical_start = [] - logical_end = [] - last_newline = True - parens = 0 - for t in generate_tokens(''.join(source_lines)): - if t[0] in [tokenize.COMMENT, tokenize.DEDENT, - tokenize.INDENT, tokenize.NL, - tokenize.ENDMARKER]: - continue - if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]: - last_newline = True - logical_end.append((t[3][0] - 1, t[2][1])) - continue - if last_newline and not parens: - logical_start.append((t[2][0] - 1, t[2][1])) - last_newline = False - if t[0] == tokenize.OP: - if t[1] in '([{': - parens += 1 - elif t[1] in '}])': - parens -= 1 - return (logical_start, logical_end) - - -def _get_logical(source_lines, result, logical_start, logical_end): - """Return the logical line corresponding to the result. - - Assumes input is already E702-clean. - - """ - row = result['line'] - 1 - col = result['column'] - 1 - ls = None - le = None - for i in range(0, len(logical_start), 1): - assert logical_end - x = logical_end[i] - if x[0] > row or (x[0] == row and x[1] > col): - le = x - ls = logical_start[i] - break - if ls is None: - return None - original = source_lines[ls[0]:le[0] + 1] - return ls, le, original - - -def get_item(items, index, default=None): - if 0 <= index < len(items): - return items[index] - else: - return default - - -def reindent(source, indent_size): - """Reindent all lines.""" - reindenter = Reindenter(source) - return reindenter.run(indent_size) - - -def code_almost_equal(a, b): - """Return True if code is similar. - - Ignore whitespace when comparing specific line. - - """ - split_a = split_and_strip_non_empty_lines(a) - split_b = split_and_strip_non_empty_lines(b) - - if len(split_a) != len(split_b): - return False - - for (index, _) in enumerate(split_a): - if ''.join(split_a[index].split()) != ''.join(split_b[index].split()): - return False - - return True - - -def split_and_strip_non_empty_lines(text): - """Return lines split by newline. - - Ignore empty lines. - - """ - return [line.strip() for line in text.splitlines() if line.strip()] - - -def fix_e265(source, aggressive=False): # pylint: disable=unused-argument - """Format block comments.""" - if '#' not in source: - # Optimization. - return source - - ignored_line_numbers = multiline_string_lines( - source, - include_docstrings=True) | set(commented_out_code_lines(source)) - - fixed_lines = [] - sio = io.StringIO(source) - for (line_number, line) in enumerate(sio.readlines(), start=1): - if ( - line.lstrip().startswith('#') and - line_number not in ignored_line_numbers and - not pycodestyle.noqa(line) - ): - indentation = _get_indentation(line) - line = line.lstrip() - - # Normalize beginning if not a shebang. - if len(line) > 1: - pos = next((index for index, c in enumerate(line) - if c != '#')) - if ( - # Leave multiple spaces like '# ' alone. - (line[:pos].count('#') > 1 or line[1].isalnum()) and - # Leave stylistic outlined blocks alone. - not line.rstrip().endswith('#') - ): - line = '# ' + line.lstrip('# \t') - - fixed_lines.append(indentation + line) - else: - fixed_lines.append(line) - - return ''.join(fixed_lines) - - -def refactor(source, fixer_names, ignore=None, filename=''): - """Return refactored code using lib2to3. - - Skip if ignore string is produced in the refactored code. - - """ - check_lib2to3() - from lib2to3 import pgen2 - try: - new_text = refactor_with_2to3(source, - fixer_names=fixer_names, - filename=filename) - except (pgen2.parse.ParseError, - SyntaxError, - UnicodeDecodeError, - UnicodeEncodeError): - return source - - if ignore: - if ignore in new_text and ignore not in source: - return source - - return new_text - - -def code_to_2to3(select, ignore): - fixes = set() - for code, fix in CODE_TO_2TO3.items(): - if code_match(code, select=select, ignore=ignore): - fixes |= set(fix) - return fixes - - -def fix_2to3(source, - aggressive=True, select=None, ignore=None, filename=''): - """Fix various deprecated code (via lib2to3).""" - if not aggressive: - return source - - select = select or [] - ignore = ignore or [] - - return refactor(source, - code_to_2to3(select=select, - ignore=ignore), - filename=filename) - - -def fix_w602(source, aggressive=True): - """Fix deprecated form of raising exception.""" - if not aggressive: - return source - - return refactor(source, ['raise'], - ignore='with_traceback') - - -def find_newline(source): - """Return type of newline used in source. - - Input is a list of lines. - - """ - assert not isinstance(source, unicode) - - counter = collections.defaultdict(int) - for line in source: - if line.endswith(CRLF): - counter[CRLF] += 1 - elif line.endswith(CR): - counter[CR] += 1 - elif line.endswith(LF): - counter[LF] += 1 - - return (sorted(counter, key=counter.get, reverse=True) or [LF])[0] - - -def _get_indentword(source): - """Return indentation type.""" - indent_word = ' ' # Default in case source has no indentation - try: - for t in generate_tokens(source): - if t[0] == token.INDENT: - indent_word = t[1] - break - except (SyntaxError, tokenize.TokenError): - pass - return indent_word - - -def _get_indentation(line): - """Return leading whitespace.""" - if line.strip(): - non_whitespace_index = len(line) - len(line.lstrip()) - return line[:non_whitespace_index] - else: - return '' - - -def get_diff_text(old, new, filename): - """Return text of unified diff between old and new.""" - newline = '\n' - diff = difflib.unified_diff( - old, new, - 'original/' + filename, - 'fixed/' + filename, - lineterm=newline) - - text = '' - for line in diff: - text += line - - # Work around missing newline (http://bugs.python.org/issue2142). - if text and not line.endswith(newline): - text += newline + r'\ No newline at end of file' + newline - - return text - - -def _priority_key(pep8_result): - """Key for sorting PEP8 results. - - Global fixes should be done first. This is important for things like - indentation. - - """ - priority = [ - # Fix multiline colon-based before semicolon based. - 'e701', - # Break multiline statements early. - 'e702', - # Things that make lines longer. - 'e225', 'e231', - # Remove extraneous whitespace before breaking lines. - 'e201', - # Shorten whitespace in comment before resorting to wrapping. - 'e262' - ] - middle_index = 10000 - lowest_priority = [ - # We need to shorten lines last since the logical fixer can get in a - # loop, which causes us to exit early. - 'e501' - ] - key = pep8_result['id'].lower() - try: - return priority.index(key) - except ValueError: - try: - return middle_index + lowest_priority.index(key) + 1 - except ValueError: - return middle_index - - -def shorten_line(tokens, source, indentation, indent_word, max_line_length, - aggressive=False, experimental=False, previous_line=''): - """Separate line at OPERATOR. - - Multiple candidates will be yielded. - - """ - for candidate in _shorten_line(tokens=tokens, - source=source, - indentation=indentation, - indent_word=indent_word, - aggressive=aggressive, - previous_line=previous_line): - yield candidate - - if aggressive: - for key_token_strings in SHORTEN_OPERATOR_GROUPS: - shortened = _shorten_line_at_tokens( - tokens=tokens, - source=source, - indentation=indentation, - indent_word=indent_word, - key_token_strings=key_token_strings, - aggressive=aggressive) - - if shortened is not None and shortened != source: - yield shortened - - if experimental: - for shortened in _shorten_line_at_tokens_new( - tokens=tokens, - source=source, - indentation=indentation, - max_line_length=max_line_length): - - yield shortened - - -def _shorten_line(tokens, source, indentation, indent_word, - aggressive=False, previous_line=''): - """Separate line at OPERATOR. - - The input is expected to be free of newlines except for inside multiline - strings and at the end. - - Multiple candidates will be yielded. - - """ - for (token_type, - token_string, - start_offset, - end_offset) in token_offsets(tokens): - - if ( - token_type == tokenize.COMMENT and - not is_probably_part_of_multiline(previous_line) and - not is_probably_part_of_multiline(source) and - not source[start_offset + 1:].strip().lower().startswith( - ('noqa', 'pragma:', 'pylint:')) - ): - # Move inline comments to previous line. - first = source[:start_offset] - second = source[start_offset:] - yield (indentation + second.strip() + '\n' + - indentation + first.strip() + '\n') - elif token_type == token.OP and token_string != '=': - # Don't break on '=' after keyword as this violates PEP 8. - - assert token_type != token.INDENT - - first = source[:end_offset] - - second_indent = indentation - if first.rstrip().endswith('('): - second_indent += indent_word - elif '(' in first: - second_indent += ' ' * (1 + first.find('(')) - else: - second_indent += indent_word - - second = (second_indent + source[end_offset:].lstrip()) - if ( - not second.strip() or - second.lstrip().startswith('#') - ): - continue - - # Do not begin a line with a comma - if second.lstrip().startswith(','): - continue - # Do end a line with a dot - if first.rstrip().endswith('.'): - continue - if token_string in '+-*/': - fixed = first + ' \\' + '\n' + second - else: - fixed = first + '\n' + second - - # Only fix if syntax is okay. - if check_syntax(normalize_multiline(fixed) - if aggressive else fixed): - yield indentation + fixed - - -def _is_binary_operator(token_type, text): - return ((token_type == tokenize.OP or text in ['and', 'or']) and - text not in '()[]{},:.;@=%~') - - -# A convenient way to handle tokens. -Token = collections.namedtuple('Token', ['token_type', 'token_string', - 'spos', 'epos', 'line']) - - -class ReformattedLines(object): - - """The reflowed lines of atoms. - - Each part of the line is represented as an "atom." They can be moved - around when need be to get the optimal formatting. - - """ - - ########################################################################### - # Private Classes - - class _Indent(object): - - """Represent an indentation in the atom stream.""" - - def __init__(self, indent_amt): - self._indent_amt = indent_amt - - def emit(self): - return ' ' * self._indent_amt - - @property - def size(self): - return self._indent_amt - - class _Space(object): - - """Represent a space in the atom stream.""" - - def emit(self): - return ' ' - - @property - def size(self): - return 1 - - class _LineBreak(object): - - """Represent a line break in the atom stream.""" - - def emit(self): - return '\n' - - @property - def size(self): - return 0 - - def __init__(self, max_line_length): - self._max_line_length = max_line_length - self._lines = [] - self._bracket_depth = 0 - self._prev_item = None - self._prev_prev_item = None - - def __repr__(self): - return self.emit() - - ########################################################################### - # Public Methods - - def add(self, obj, indent_amt, break_after_open_bracket): - if isinstance(obj, Atom): - self._add_item(obj, indent_amt) - return - - self._add_container(obj, indent_amt, break_after_open_bracket) - - def add_comment(self, item): - num_spaces = 2 - if len(self._lines) > 1: - if isinstance(self._lines[-1], self._Space): - num_spaces -= 1 - if len(self._lines) > 2: - if isinstance(self._lines[-2], self._Space): - num_spaces -= 1 - - while num_spaces > 0: - self._lines.append(self._Space()) - num_spaces -= 1 - self._lines.append(item) - - def add_indent(self, indent_amt): - self._lines.append(self._Indent(indent_amt)) - - def add_line_break(self, indent): - self._lines.append(self._LineBreak()) - self.add_indent(len(indent)) - - def add_line_break_at(self, index, indent_amt): - self._lines.insert(index, self._LineBreak()) - self._lines.insert(index + 1, self._Indent(indent_amt)) - - def add_space_if_needed(self, curr_text, equal=False): - if ( - not self._lines or isinstance( - self._lines[-1], (self._LineBreak, self._Indent, self._Space)) - ): - return - - prev_text = unicode(self._prev_item) - prev_prev_text = ( - unicode(self._prev_prev_item) if self._prev_prev_item else '') - - if ( - # The previous item was a keyword or identifier and the current - # item isn't an operator that doesn't require a space. - ((self._prev_item.is_keyword or self._prev_item.is_string or - self._prev_item.is_name or self._prev_item.is_number) and - (curr_text[0] not in '([{.,:}])' or - (curr_text[0] == '=' and equal))) or - - # Don't place spaces around a '.', unless it's in an 'import' - # statement. - ((prev_prev_text != 'from' and prev_text[-1] != '.' and - curr_text != 'import') and - - # Don't place a space before a colon. - curr_text[0] != ':' and - - # Don't split up ending brackets by spaces. - ((prev_text[-1] in '}])' and curr_text[0] not in '.,}])') or - - # Put a space after a colon or comma. - prev_text[-1] in ':,' or - - # Put space around '=' if asked to. - (equal and prev_text == '=') or - - # Put spaces around non-unary arithmetic operators. - ((self._prev_prev_item and - (prev_text not in '+-' and - (self._prev_prev_item.is_name or - self._prev_prev_item.is_number or - self._prev_prev_item.is_string)) and - prev_text in ('+', '-', '%', '*', '/', '//', '**', 'in'))))) - ): - self._lines.append(self._Space()) - - def previous_item(self): - """Return the previous non-whitespace item.""" - return self._prev_item - - def fits_on_current_line(self, item_extent): - return self.current_size() + item_extent <= self._max_line_length - - def current_size(self): - """The size of the current line minus the indentation.""" - size = 0 - for item in reversed(self._lines): - size += item.size - if isinstance(item, self._LineBreak): - break - - return size - - def line_empty(self): - return (self._lines and - isinstance(self._lines[-1], - (self._LineBreak, self._Indent))) - - def emit(self): - string = '' - for item in self._lines: - if isinstance(item, self._LineBreak): - string = string.rstrip() - string += item.emit() - - return string.rstrip() + '\n' - - ########################################################################### - # Private Methods - - def _add_item(self, item, indent_amt): - """Add an item to the line. - - Reflow the line to get the best formatting after the item is - inserted. The bracket depth indicates if the item is being - inserted inside of a container or not. - - """ - if self._prev_item and self._prev_item.is_string and item.is_string: - # Place consecutive string literals on separate lines. - self._lines.append(self._LineBreak()) - self._lines.append(self._Indent(indent_amt)) - - item_text = unicode(item) - if self._lines and self._bracket_depth: - # Adding the item into a container. - self._prevent_default_initializer_splitting(item, indent_amt) - - if item_text in '.,)]}': - self._split_after_delimiter(item, indent_amt) - - elif self._lines and not self.line_empty(): - # Adding the item outside of a container. - if self.fits_on_current_line(len(item_text)): - self._enforce_space(item) - - else: - # Line break for the new item. - self._lines.append(self._LineBreak()) - self._lines.append(self._Indent(indent_amt)) - - self._lines.append(item) - self._prev_item, self._prev_prev_item = item, self._prev_item - - if item_text in '([{': - self._bracket_depth += 1 - - elif item_text in '}])': - self._bracket_depth -= 1 - assert self._bracket_depth >= 0 - - def _add_container(self, container, indent_amt, break_after_open_bracket): - actual_indent = indent_amt + 1 - - if ( - unicode(self._prev_item) != '=' and - not self.line_empty() and - not self.fits_on_current_line( - container.size + self._bracket_depth + 2) - ): - - if unicode(container)[0] == '(' and self._prev_item.is_name: - # Don't split before the opening bracket of a call. - break_after_open_bracket = True - actual_indent = indent_amt + 4 - elif ( - break_after_open_bracket or - unicode(self._prev_item) not in '([{' - ): - # If the container doesn't fit on the current line and the - # current line isn't empty, place the container on the next - # line. - self._lines.append(self._LineBreak()) - self._lines.append(self._Indent(indent_amt)) - break_after_open_bracket = False - else: - actual_indent = self.current_size() + 1 - break_after_open_bracket = False - - if isinstance(container, (ListComprehension, IfExpression)): - actual_indent = indent_amt - - # Increase the continued indentation only if recursing on a - # container. - container.reflow(self, ' ' * actual_indent, - break_after_open_bracket=break_after_open_bracket) - - def _prevent_default_initializer_splitting(self, item, indent_amt): - """Prevent splitting between a default initializer. - - When there is a default initializer, it's best to keep it all on - the same line. It's nicer and more readable, even if it goes - over the maximum allowable line length. This goes back along the - current line to determine if we have a default initializer, and, - if so, to remove extraneous whitespaces and add a line - break/indent before it if needed. - - """ - if unicode(item) == '=': - # This is the assignment in the initializer. Just remove spaces for - # now. - self._delete_whitespace() - return - - if (not self._prev_item or not self._prev_prev_item or - unicode(self._prev_item) != '='): - return - - self._delete_whitespace() - prev_prev_index = self._lines.index(self._prev_prev_item) - - if ( - isinstance(self._lines[prev_prev_index - 1], self._Indent) or - self.fits_on_current_line(item.size + 1) - ): - # The default initializer is already the only item on this line. - # Don't insert a newline here. - return - - # Replace the space with a newline/indent combo. - if isinstance(self._lines[prev_prev_index - 1], self._Space): - del self._lines[prev_prev_index - 1] - - self.add_line_break_at(self._lines.index(self._prev_prev_item), - indent_amt) - - def _split_after_delimiter(self, item, indent_amt): - """Split the line only after a delimiter.""" - self._delete_whitespace() - - if self.fits_on_current_line(item.size): - return - - last_space = None - for item in reversed(self._lines): - if ( - last_space and - (not isinstance(item, Atom) or not item.is_colon) - ): - break - else: - last_space = None - if isinstance(item, self._Space): - last_space = item - if isinstance(item, (self._LineBreak, self._Indent)): - return - - if not last_space: - return - - self.add_line_break_at(self._lines.index(last_space), indent_amt) - - def _enforce_space(self, item): - """Enforce a space in certain situations. - - There are cases where we will want a space where normally we - wouldn't put one. This just enforces the addition of a space. - - """ - if isinstance(self._lines[-1], - (self._Space, self._LineBreak, self._Indent)): - return - - if not self._prev_item: - return - - item_text = unicode(item) - prev_text = unicode(self._prev_item) - - # Prefer a space around a '.' in an import statement, and between the - # 'import' and '('. - if ( - (item_text == '.' and prev_text == 'from') or - (item_text == 'import' and prev_text == '.') or - (item_text == '(' and prev_text == 'import') - ): - self._lines.append(self._Space()) - - def _delete_whitespace(self): - """Delete all whitespace from the end of the line.""" - while isinstance(self._lines[-1], (self._Space, self._LineBreak, - self._Indent)): - del self._lines[-1] - - -class Atom(object): - - """The smallest unbreakable unit that can be reflowed.""" - - def __init__(self, atom): - self._atom = atom - - def __repr__(self): - return self._atom.token_string - - def __len__(self): - return self.size - - def reflow( - self, reflowed_lines, continued_indent, extent, - break_after_open_bracket=False, - is_list_comp_or_if_expr=False, - next_is_dot=False - ): - if self._atom.token_type == tokenize.COMMENT: - reflowed_lines.add_comment(self) - return - - total_size = extent if extent else self.size - - if self._atom.token_string not in ',:([{}])': - # Some atoms will need an extra 1-sized space token after them. - total_size += 1 - - prev_item = reflowed_lines.previous_item() - if ( - not is_list_comp_or_if_expr and - not reflowed_lines.fits_on_current_line(total_size) and - not (next_is_dot and - reflowed_lines.fits_on_current_line(self.size + 1)) and - not reflowed_lines.line_empty() and - not self.is_colon and - not (prev_item and prev_item.is_name and - unicode(self) == '(') - ): - # Start a new line if there is already something on the line and - # adding this atom would make it go over the max line length. - reflowed_lines.add_line_break(continued_indent) - else: - reflowed_lines.add_space_if_needed(unicode(self)) - - reflowed_lines.add(self, len(continued_indent), - break_after_open_bracket) - - def emit(self): - return self.__repr__() - - @property - def is_keyword(self): - return keyword.iskeyword(self._atom.token_string) - - @property - def is_string(self): - return self._atom.token_type == tokenize.STRING - - @property - def is_name(self): - return self._atom.token_type == tokenize.NAME - - @property - def is_number(self): - return self._atom.token_type == tokenize.NUMBER - - @property - def is_comma(self): - return self._atom.token_string == ',' - - @property - def is_colon(self): - return self._atom.token_string == ':' - - @property - def size(self): - return len(self._atom.token_string) - - -class Container(object): - - """Base class for all container types.""" - - def __init__(self, items): - self._items = items - - def __repr__(self): - string = '' - last_was_keyword = False - - for item in self._items: - if item.is_comma: - string += ', ' - elif item.is_colon: - string += ': ' - else: - item_string = unicode(item) - if ( - string and - (last_was_keyword or - (not string.endswith(tuple('([{,.:}]) ')) and - not item_string.startswith(tuple('([{,.:}])')))) - ): - string += ' ' - string += item_string - - last_was_keyword = item.is_keyword - return string - - def __iter__(self): - for element in self._items: - yield element - - def __getitem__(self, idx): - return self._items[idx] - - def reflow(self, reflowed_lines, continued_indent, - break_after_open_bracket=False): - last_was_container = False - for (index, item) in enumerate(self._items): - next_item = get_item(self._items, index + 1) - - if isinstance(item, Atom): - is_list_comp_or_if_expr = ( - isinstance(self, (ListComprehension, IfExpression))) - item.reflow(reflowed_lines, continued_indent, - self._get_extent(index), - is_list_comp_or_if_expr=is_list_comp_or_if_expr, - next_is_dot=(next_item and - unicode(next_item) == '.')) - if last_was_container and item.is_comma: - reflowed_lines.add_line_break(continued_indent) - last_was_container = False - else: # isinstance(item, Container) - reflowed_lines.add(item, len(continued_indent), - break_after_open_bracket) - last_was_container = not isinstance(item, (ListComprehension, - IfExpression)) - - if ( - break_after_open_bracket and index == 0 and - # Prefer to keep empty containers together instead of - # separating them. - unicode(item) == self.open_bracket and - (not next_item or unicode(next_item) != self.close_bracket) and - (len(self._items) != 3 or not isinstance(next_item, Atom)) - ): - reflowed_lines.add_line_break(continued_indent) - break_after_open_bracket = False - else: - next_next_item = get_item(self._items, index + 2) - if ( - unicode(item) not in ['.', '%', 'in'] and - next_item and not isinstance(next_item, Container) and - unicode(next_item) != ':' and - next_next_item and (not isinstance(next_next_item, Atom) or - unicode(next_item) == 'not') and - not reflowed_lines.line_empty() and - not reflowed_lines.fits_on_current_line( - self._get_extent(index + 1) + 2) - ): - reflowed_lines.add_line_break(continued_indent) - - def _get_extent(self, index): - """The extent of the full element. - - E.g., the length of a function call or keyword. - - """ - extent = 0 - prev_item = get_item(self._items, index - 1) - seen_dot = prev_item and unicode(prev_item) == '.' - while index < len(self._items): - item = get_item(self._items, index) - index += 1 - - if isinstance(item, (ListComprehension, IfExpression)): - break - - if isinstance(item, Container): - if prev_item and prev_item.is_name: - if seen_dot: - extent += 1 - else: - extent += item.size - - prev_item = item - continue - elif (unicode(item) not in ['.', '=', ':', 'not'] and - not item.is_name and not item.is_string): - break - - if unicode(item) == '.': - seen_dot = True - - extent += item.size - prev_item = item - - return extent - - @property - def is_string(self): - return False - - @property - def size(self): - return len(self.__repr__()) - - @property - def is_keyword(self): - return False - - @property - def is_name(self): - return False - - @property - def is_comma(self): - return False - - @property - def is_colon(self): - return False - - @property - def open_bracket(self): - return None - - @property - def close_bracket(self): - return None - - -class Tuple(Container): - - """A high-level representation of a tuple.""" - - @property - def open_bracket(self): - return '(' - - @property - def close_bracket(self): - return ')' - - -class List(Container): - - """A high-level representation of a list.""" - - @property - def open_bracket(self): - return '[' - - @property - def close_bracket(self): - return ']' - - -class DictOrSet(Container): - - """A high-level representation of a dictionary or set.""" - - @property - def open_bracket(self): - return '{' - - @property - def close_bracket(self): - return '}' - - -class ListComprehension(Container): - - """A high-level representation of a list comprehension.""" - - @property - def size(self): - length = 0 - for item in self._items: - if isinstance(item, IfExpression): - break - length += item.size - return length - - -class IfExpression(Container): - - """A high-level representation of an if-expression.""" - - -def _parse_container(tokens, index, for_or_if=None): - """Parse a high-level container, such as a list, tuple, etc.""" - - # Store the opening bracket. - items = [Atom(Token(*tokens[index]))] - index += 1 - - num_tokens = len(tokens) - while index < num_tokens: - tok = Token(*tokens[index]) - - if tok.token_string in ',)]}': - # First check if we're at the end of a list comprehension or - # if-expression. Don't add the ending token as part of the list - # comprehension or if-expression, because they aren't part of those - # constructs. - if for_or_if == 'for': - return (ListComprehension(items), index - 1) - - elif for_or_if == 'if': - return (IfExpression(items), index - 1) - - # We've reached the end of a container. - items.append(Atom(tok)) - - # If not, then we are at the end of a container. - if tok.token_string == ')': - # The end of a tuple. - return (Tuple(items), index) - - elif tok.token_string == ']': - # The end of a list. - return (List(items), index) - - elif tok.token_string == '}': - # The end of a dictionary or set. - return (DictOrSet(items), index) - - elif tok.token_string in '([{': - # A sub-container is being defined. - (container, index) = _parse_container(tokens, index) - items.append(container) - - elif tok.token_string == 'for': - (container, index) = _parse_container(tokens, index, 'for') - items.append(container) - - elif tok.token_string == 'if': - (container, index) = _parse_container(tokens, index, 'if') - items.append(container) - - else: - items.append(Atom(tok)) - - index += 1 - - return (None, None) - - -def _parse_tokens(tokens): - """Parse the tokens. - - This converts the tokens into a form where we can manipulate them - more easily. - - """ - - index = 0 - parsed_tokens = [] - - num_tokens = len(tokens) - while index < num_tokens: - tok = Token(*tokens[index]) - - assert tok.token_type != token.INDENT - if tok.token_type == tokenize.NEWLINE: - # There's only one newline and it's at the end. - break - - if tok.token_string in '([{': - (container, index) = _parse_container(tokens, index) - if not container: - return None - parsed_tokens.append(container) - else: - parsed_tokens.append(Atom(tok)) - - index += 1 - - return parsed_tokens - - -def _reflow_lines(parsed_tokens, indentation, max_line_length, - start_on_prefix_line): - """Reflow the lines so that it looks nice.""" - - if unicode(parsed_tokens[0]) == 'def': - # A function definition gets indented a bit more. - continued_indent = indentation + ' ' * 2 * DEFAULT_INDENT_SIZE - else: - continued_indent = indentation + ' ' * DEFAULT_INDENT_SIZE - - break_after_open_bracket = not start_on_prefix_line - - lines = ReformattedLines(max_line_length) - lines.add_indent(len(indentation.lstrip('\r\n'))) - - if not start_on_prefix_line: - # If splitting after the opening bracket will cause the first element - # to be aligned weirdly, don't try it. - first_token = get_item(parsed_tokens, 0) - second_token = get_item(parsed_tokens, 1) - - if ( - first_token and second_token and - unicode(second_token)[0] == '(' and - len(indentation) + len(first_token) + 1 == len(continued_indent) - ): - return None - - for item in parsed_tokens: - lines.add_space_if_needed(unicode(item), equal=True) - - save_continued_indent = continued_indent - if start_on_prefix_line and isinstance(item, Container): - start_on_prefix_line = False - continued_indent = ' ' * (lines.current_size() + 1) - - item.reflow(lines, continued_indent, break_after_open_bracket) - continued_indent = save_continued_indent - - return lines.emit() - - -def _shorten_line_at_tokens_new(tokens, source, indentation, - max_line_length): - """Shorten the line taking its length into account. - - The input is expected to be free of newlines except for inside - multiline strings and at the end. - - """ - # Yield the original source so to see if it's a better choice than the - # shortened candidate lines we generate here. - yield indentation + source - - parsed_tokens = _parse_tokens(tokens) - - if parsed_tokens: - # Perform two reflows. The first one starts on the same line as the - # prefix. The second starts on the line after the prefix. - fixed = _reflow_lines(parsed_tokens, indentation, max_line_length, - start_on_prefix_line=True) - if fixed and check_syntax(normalize_multiline(fixed.lstrip())): - yield fixed - - fixed = _reflow_lines(parsed_tokens, indentation, max_line_length, - start_on_prefix_line=False) - if fixed and check_syntax(normalize_multiline(fixed.lstrip())): - yield fixed - - -def _shorten_line_at_tokens(tokens, source, indentation, indent_word, - key_token_strings, aggressive): - """Separate line by breaking at tokens in key_token_strings. - - The input is expected to be free of newlines except for inside - multiline strings and at the end. - - """ - offsets = [] - for (index, _t) in enumerate(token_offsets(tokens)): - (token_type, - token_string, - start_offset, - end_offset) = _t - - assert token_type != token.INDENT - - if token_string in key_token_strings: - # Do not break in containers with zero or one items. - unwanted_next_token = { - '(': ')', - '[': ']', - '{': '}'}.get(token_string) - if unwanted_next_token: - if ( - get_item(tokens, - index + 1, - default=[None, None])[1] == unwanted_next_token or - get_item(tokens, - index + 2, - default=[None, None])[1] == unwanted_next_token - ): - continue - - if ( - index > 2 and token_string == '(' and - tokens[index - 1][1] in ',(%[' - ): - # Don't split after a tuple start, or before a tuple start if - # the tuple is in a list. - continue - - if end_offset < len(source) - 1: - # Don't split right before newline. - offsets.append(end_offset) - else: - # Break at adjacent strings. These were probably meant to be on - # separate lines in the first place. - previous_token = get_item(tokens, index - 1) - if ( - token_type == tokenize.STRING and - previous_token and previous_token[0] == tokenize.STRING - ): - offsets.append(start_offset) - - current_indent = None - fixed = None - for line in split_at_offsets(source, offsets): - if fixed: - fixed += '\n' + current_indent + line - - for symbol in '([{': - if line.endswith(symbol): - current_indent += indent_word - else: - # First line. - fixed = line - assert not current_indent - current_indent = indent_word - - assert fixed is not None - - if check_syntax(normalize_multiline(fixed) - if aggressive > 1 else fixed): - return indentation + fixed - else: - return None - - -def token_offsets(tokens): - """Yield tokens and offsets.""" - end_offset = 0 - previous_end_row = 0 - previous_end_column = 0 - for t in tokens: - token_type = t[0] - token_string = t[1] - (start_row, start_column) = t[2] - (end_row, end_column) = t[3] - - # Account for the whitespace between tokens. - end_offset += start_column - if previous_end_row == start_row: - end_offset -= previous_end_column - - # Record the start offset of the token. - start_offset = end_offset - - # Account for the length of the token itself. - end_offset += len(token_string) - - yield (token_type, - token_string, - start_offset, - end_offset) - - previous_end_row = end_row - previous_end_column = end_column - - -def normalize_multiline(line): - """Normalize multiline-related code that will cause syntax error. - - This is for purposes of checking syntax. - - """ - if line.startswith('def ') and line.rstrip().endswith(':'): - return line + ' pass' - elif line.startswith('return '): - return 'def _(): ' + line - elif line.startswith('@'): - return line + 'def _(): pass' - elif line.startswith('class '): - return line + ' pass' - elif line.startswith(('if ', 'elif ', 'for ', 'while ')): - return line + ' pass' - else: - return line - - -def fix_whitespace(line, offset, replacement): - """Replace whitespace at offset and return fixed line.""" - # Replace escaped newlines too - left = line[:offset].rstrip('\n\r \t\\') - right = line[offset:].lstrip('\n\r \t\\') - if right.startswith('#'): - return line - else: - return left + replacement + right - - -def _execute_pep8(pep8_options, source): - """Execute pycodestyle via python method calls.""" - class QuietReport(pycodestyle.BaseReport): - - """Version of checker that does not print.""" - - def __init__(self, options): - super(QuietReport, self).__init__(options) - self.__full_error_results = [] - - def error(self, line_number, offset, text, check): - """Collect errors.""" - code = super(QuietReport, self).error(line_number, - offset, - text, - check) - if code: - self.__full_error_results.append( - {'id': code, - 'line': line_number, - 'column': offset + 1, - 'info': text}) - - def full_error_results(self): - """Return error results in detail. - - Results are in the form of a list of dictionaries. Each - dictionary contains 'id', 'line', 'column', and 'info'. - - """ - return self.__full_error_results - - checker = pycodestyle.Checker('', lines=source, reporter=QuietReport, - **pep8_options) - checker.check_all() - return checker.report.full_error_results() - - -def _remove_leading_and_normalize(line): - return line.lstrip().rstrip(CR + LF) + '\n' - - -class Reindenter(object): - - """Reindents badly-indented code to uniformly use four-space indentation. - - Released to the public domain, by Tim Peters, 03 October 2000. - - """ - - def __init__(self, input_text): - sio = io.StringIO(input_text) - source_lines = sio.readlines() - - self.string_content_line_numbers = multiline_string_lines(input_text) - - # File lines, rstripped & tab-expanded. Dummy at start is so - # that we can use tokenize's 1-based line numbering easily. - # Note that a line is all-blank iff it is a newline. - self.lines = [] - for line_number, line in enumerate(source_lines, start=1): - # Do not modify if inside a multiline string. - if line_number in self.string_content_line_numbers: - self.lines.append(line) - else: - # Only expand leading tabs. - self.lines.append(_get_indentation(line).expandtabs() + - _remove_leading_and_normalize(line)) - - self.lines.insert(0, None) - self.index = 1 # index into self.lines of next line - self.input_text = input_text - - def run(self, indent_size=DEFAULT_INDENT_SIZE): - """Fix indentation and return modified line numbers. - - Line numbers are indexed at 1. - - """ - if indent_size < 1: - return self.input_text - - try: - stats = _reindent_stats(tokenize.generate_tokens(self.getline)) - except (SyntaxError, tokenize.TokenError): - return self.input_text - # Remove trailing empty lines. - lines = self.lines - # Sentinel. - stats.append((len(lines), 0)) - # Map count of leading spaces to # we want. - have2want = {} - # Program after transformation. - after = [] - # Copy over initial empty lines -- there's nothing to do until - # we see a line with *something* on it. - i = stats[0][0] - after.extend(lines[1:i]) - for i in range(len(stats) - 1): - thisstmt, thislevel = stats[i] - nextstmt = stats[i + 1][0] - have = _leading_space_count(lines[thisstmt]) - want = thislevel * indent_size - if want < 0: - # A comment line. - if have: - # An indented comment line. If we saw the same - # indentation before, reuse what it most recently - # mapped to. - want = have2want.get(have, -1) - if want < 0: - # Then it probably belongs to the next real stmt. - for j in range(i + 1, len(stats) - 1): - jline, jlevel = stats[j] - if jlevel >= 0: - if have == _leading_space_count(lines[jline]): - want = jlevel * indent_size - break - if want < 0: # Maybe it's a hanging - # comment like this one, - # in which case we should shift it like its base - # line got shifted. - for j in range(i - 1, -1, -1): - jline, jlevel = stats[j] - if jlevel >= 0: - want = (have + _leading_space_count( - after[jline - 1]) - - _leading_space_count(lines[jline])) - break - if want < 0: - # Still no luck -- leave it alone. - want = have - else: - want = 0 - assert want >= 0 - have2want[have] = want - diff = want - have - if diff == 0 or have == 0: - after.extend(lines[thisstmt:nextstmt]) - else: - for line_number, line in enumerate(lines[thisstmt:nextstmt], - start=thisstmt): - if line_number in self.string_content_line_numbers: - after.append(line) - elif diff > 0: - if line == '\n': - after.append(line) - else: - after.append(' ' * diff + line) - else: - remove = min(_leading_space_count(line), -diff) - after.append(line[remove:]) - - return ''.join(after) - - def getline(self): - """Line-getter for tokenize.""" - if self.index >= len(self.lines): - line = '' - else: - line = self.lines[self.index] - self.index += 1 - return line - - -def _reindent_stats(tokens): - """Return list of (lineno, indentlevel) pairs. - - One for each stmt and comment line. indentlevel is -1 for comment - lines, as a signal that tokenize doesn't know what to do about them; - indeed, they're our headache! - - """ - find_stmt = 1 # Next token begins a fresh stmt? - level = 0 # Current indent level. - stats = [] - - for t in tokens: - token_type = t[0] - sline = t[2][0] - line = t[4] - - if token_type == tokenize.NEWLINE: - # A program statement, or ENDMARKER, will eventually follow, - # after some (possibly empty) run of tokens of the form - # (NL | COMMENT)* (INDENT | DEDENT+)? - find_stmt = 1 - - elif token_type == tokenize.INDENT: - find_stmt = 1 - level += 1 - - elif token_type == tokenize.DEDENT: - find_stmt = 1 - level -= 1 - - elif token_type == tokenize.COMMENT: - if find_stmt: - stats.append((sline, -1)) - # But we're still looking for a new stmt, so leave - # find_stmt alone. - - elif token_type == tokenize.NL: - pass - - elif find_stmt: - # This is the first "real token" following a NEWLINE, so it - # must be the first token of the next program statement, or an - # ENDMARKER. - find_stmt = 0 - if line: # Not endmarker. - stats.append((sline, level)) - - return stats - - -def _leading_space_count(line): - """Return number of leading spaces in line.""" - i = 0 - while i < len(line) and line[i] == ' ': - i += 1 - return i - - -def refactor_with_2to3(source_text, fixer_names, filename=''): - """Use lib2to3 to refactor the source. - - Return the refactored source code. - - """ - from lib2to3.refactor import RefactoringTool - fixers = ['lib2to3.fixes.fix_' + name for name in fixer_names] - tool = RefactoringTool(fixer_names=fixers, explicit=fixers) - - from lib2to3.pgen2 import tokenize as lib2to3_tokenize - try: - # The name parameter is necessary particularly for the "import" fixer. - return unicode(tool.refactor_string(source_text, name=filename)) - except lib2to3_tokenize.TokenError: - return source_text - - -def check_syntax(code): - """Return True if syntax is okay.""" - try: - return compile(code, '', 'exec') - except (SyntaxError, TypeError, UnicodeDecodeError): - return False - - -def filter_results(source, results, aggressive): - """Filter out spurious reports from pycodestyle. - - If aggressive is True, we allow possibly unsafe fixes (E711, E712). - - """ - non_docstring_string_line_numbers = multiline_string_lines( - source, include_docstrings=False) - all_string_line_numbers = multiline_string_lines( - source, include_docstrings=True) - - commented_out_code_line_numbers = commented_out_code_lines(source) - - has_e901 = any(result['id'].lower() == 'e901' for result in results) - - for r in results: - issue_id = r['id'].lower() - - if r['line'] in non_docstring_string_line_numbers: - if issue_id.startswith(('e1', 'e501', 'w191')): - continue - - if r['line'] in all_string_line_numbers: - if issue_id in ['e501']: - continue - - # We must offset by 1 for lines that contain the trailing contents of - # multiline strings. - if not aggressive and (r['line'] + 1) in all_string_line_numbers: - # Do not modify multiline strings in non-aggressive mode. Remove - # trailing whitespace could break doctests. - if issue_id.startswith(('w29', 'w39')): - continue - - if aggressive <= 0: - if issue_id.startswith(('e711', 'e72', 'w6')): - continue - - if aggressive <= 1: - if issue_id.startswith(('e712', 'e713', 'e714', 'w5')): - continue - - if aggressive <= 2: - if issue_id.startswith(('e704', 'w5')): - continue - - if r['line'] in commented_out_code_line_numbers: - if issue_id.startswith(('e26', 'e501')): - continue - - # Do not touch indentation if there is a token error caused by - # incomplete multi-line statement. Otherwise, we risk screwing up the - # indentation. - if has_e901: - if issue_id.startswith(('e1', 'e7')): - continue - - yield r - - -def multiline_string_lines(source, include_docstrings=False): - """Return line numbers that are within multiline strings. - - The line numbers are indexed at 1. - - Docstrings are ignored. - - """ - line_numbers = set() - previous_token_type = '' - try: - for t in generate_tokens(source): - token_type = t[0] - start_row = t[2][0] - end_row = t[3][0] - - if token_type == tokenize.STRING and start_row != end_row: - if ( - include_docstrings or - previous_token_type != tokenize.INDENT - ): - # We increment by one since we want the contents of the - # string. - line_numbers |= set(range(1 + start_row, 1 + end_row)) - - previous_token_type = token_type - except (SyntaxError, tokenize.TokenError): - pass - - return line_numbers - - -def commented_out_code_lines(source): - """Return line numbers of comments that are likely code. - - Commented-out code is bad practice, but modifying it just adds even - more clutter. - - """ - line_numbers = [] - try: - for t in generate_tokens(source): - token_type = t[0] - token_string = t[1] - start_row = t[2][0] - line = t[4] - - # Ignore inline comments. - if not line.lstrip().startswith('#'): - continue - - if token_type == tokenize.COMMENT: - stripped_line = token_string.lstrip('#').strip() - if ( - ' ' in stripped_line and - '#' not in stripped_line and - check_syntax(stripped_line) - ): - line_numbers.append(start_row) - except (SyntaxError, tokenize.TokenError): - pass - - return line_numbers - - -def shorten_comment(line, max_line_length, last_comment=False): - """Return trimmed or split long comment line. - - If there are no comments immediately following it, do a text wrap. - Doing this wrapping on all comments in general would lead to jagged - comment text. - - """ - assert len(line) > max_line_length - line = line.rstrip() - - # PEP 8 recommends 72 characters for comment text. - indentation = _get_indentation(line) + '# ' - max_line_length = min(max_line_length, - len(indentation) + 72) - - MIN_CHARACTER_REPEAT = 5 - if ( - len(line) - len(line.rstrip(line[-1])) >= MIN_CHARACTER_REPEAT and - not line[-1].isalnum() - ): - # Trim comments that end with things like --------- - return line[:max_line_length] + '\n' - elif last_comment and re.match(r'\s*#+\s*\w+', line): - split_lines = textwrap.wrap(line.lstrip(' \t#'), - initial_indent=indentation, - subsequent_indent=indentation, - width=max_line_length, - break_long_words=False, - break_on_hyphens=False) - return '\n'.join(split_lines) + '\n' - else: - return line + '\n' - - -def normalize_line_endings(lines, newline): - """Return fixed line endings. - - All lines will be modified to use the most common line ending. - - """ - return [line.rstrip('\n\r') + newline for line in lines] - - -def mutual_startswith(a, b): - return b.startswith(a) or a.startswith(b) - - -def code_match(code, select, ignore): - if ignore: - assert not isinstance(ignore, unicode) - for ignored_code in [c.strip() for c in ignore]: - if mutual_startswith(code.lower(), ignored_code.lower()): - return False - - if select: - assert not isinstance(select, unicode) - for selected_code in [c.strip() for c in select]: - if mutual_startswith(code.lower(), selected_code.lower()): - return True - return False - - return True - - -def fix_code(source, options=None, encoding=None, apply_config=False): - """Return fixed source code. - - "encoding" will be used to decode "source" if it is a byte string. - - """ - options = _get_options(options, apply_config) - - if not isinstance(source, unicode): - source = source.decode(encoding or get_encoding()) - - sio = io.StringIO(source) - return fix_lines(sio.readlines(), options=options) - - -def _get_options(raw_options, apply_config): - """Return parsed options.""" - if not raw_options: - return parse_args([''], apply_config=apply_config) - - if isinstance(raw_options, dict): - options = parse_args([''], apply_config=apply_config) - for name, value in raw_options.items(): - if not hasattr(options, name): - raise ValueError("No such option '{}'".format(name)) - - # Check for very basic type errors. - expected_type = type(getattr(options, name)) - if not isinstance(expected_type, (str, unicode)): - if isinstance(value, (str, unicode)): - raise ValueError( - "Option '{}' should not be a string".format(name)) - setattr(options, name, value) - else: - options = raw_options - - return options - - -def fix_lines(source_lines, options, filename=''): - """Return fixed source code.""" - # Transform everything to line feed. Then change them back to original - # before returning fixed source code. - original_newline = find_newline(source_lines) - tmp_source = ''.join(normalize_line_endings(source_lines, '\n')) - - # Keep a history to break out of cycles. - previous_hashes = set() - - if options.line_range: - # Disable "apply_local_fixes()" for now due to issue #175. - fixed_source = tmp_source - else: - # Apply global fixes only once (for efficiency). - fixed_source = apply_global_fixes(tmp_source, - options, - filename=filename) - - passes = 0 - long_line_ignore_cache = set() - while hash(fixed_source) not in previous_hashes: - if options.pep8_passes >= 0 and passes > options.pep8_passes: - break - passes += 1 - - previous_hashes.add(hash(fixed_source)) - - tmp_source = copy.copy(fixed_source) - - fix = FixPEP8( - filename, - options, - contents=tmp_source, - long_line_ignore_cache=long_line_ignore_cache) - - fixed_source = fix.fix() - - sio = io.StringIO(fixed_source) - return ''.join(normalize_line_endings(sio.readlines(), original_newline)) - - -def fix_file(filename, options=None, output=None, apply_config=False): - if not options: - options = parse_args([filename], apply_config=apply_config) - - original_source = readlines_from_file(filename) - - fixed_source = original_source - - if options.in_place or output: - encoding = detect_encoding(filename) - - if output: - output = LineEndingWrapper(wrap_output(output, encoding=encoding)) - - fixed_source = fix_lines(fixed_source, options, filename=filename) - - if options.diff: - new = io.StringIO(fixed_source) - new = new.readlines() - diff = get_diff_text(original_source, new, filename) - if output: - output.write(diff) - output.flush() - else: - return diff - elif options.in_place: - fp = open_with_encoding(filename, encoding=encoding, mode='w') - fp.write(fixed_source) - fp.close() - else: - if output: - output.write(fixed_source) - output.flush() - else: - return fixed_source - - -def global_fixes(): - """Yield multiple (code, function) tuples.""" - for function in list(globals().values()): - if inspect.isfunction(function): - arguments = _get_parameters(function) - if arguments[:1] != ['source']: - continue - - code = extract_code_from_function(function) - if code: - yield (code, function) - - -def _get_parameters(function): - # pylint: disable=deprecated-method - if sys.version_info >= (3, 3): - # We need to match "getargspec()", which includes "self" as the first - # value for methods. - # https://bugs.python.org/issue17481#msg209469 - if inspect.ismethod(function): - function = function.__func__ - - return list(inspect.signature(function).parameters) - else: - return inspect.getargspec(function)[0] - - -def apply_global_fixes(source, options, where='global', filename=''): - """Run global fixes on source code. - - These are fixes that only need be done once (unlike those in - FixPEP8, which are dependent on pycodestyle). - - """ - if any(code_match(code, select=options.select, ignore=options.ignore) - for code in ['E101', 'E111']): - source = reindent(source, - indent_size=options.indent_size) - - for (code, function) in global_fixes(): - if code_match(code, select=options.select, ignore=options.ignore): - if options.verbose: - print('---> Applying {0} fix for {1}'.format(where, - code.upper()), - file=sys.stderr) - source = function(source, - aggressive=options.aggressive) - - source = fix_2to3(source, - aggressive=options.aggressive, - select=options.select, - ignore=options.ignore, - filename=filename) - - return source - - -def extract_code_from_function(function): - """Return code handled by function.""" - if not function.__name__.startswith('fix_'): - return None - - code = re.sub('^fix_', '', function.__name__) - if not code: - return None - - try: - int(code[1:]) - except ValueError: - return None - - return code - - -def _get_package_version(): - packages = ["pycodestyle: {0}".format(pycodestyle.__version__)] - return ", ".join(packages) - - -def create_parser(): - """Return command-line parser.""" - # Do import locally to be friendly to those who use autopep8 as a library - # and are supporting Python 2.6. - import argparse - - parser = argparse.ArgumentParser(description=docstring_summary(__doc__), - prog='autopep8') - parser.add_argument('--version', action='version', - version='%(prog)s {0} ({1})'.format( - __version__, _get_package_version())) - parser.add_argument('-v', '--verbose', action='count', - default=0, - help='print verbose messages; ' - 'multiple -v result in more verbose messages') - parser.add_argument('-d', '--diff', action='store_true', - help='print the diff for the fixed source') - parser.add_argument('-i', '--in-place', action='store_true', - help='make changes to files in place') - parser.add_argument('--global-config', metavar='filename', - default=DEFAULT_CONFIG, - help='path to a global pep8 config file; if this file ' - 'does not exist then this is ignored ' - '(default: {0})'.format(DEFAULT_CONFIG)) - parser.add_argument('--ignore-local-config', action='store_true', - help="don't look for and apply local config files; " - 'if not passed, defaults are updated with any ' - "config files in the project's root directory") - parser.add_argument('-r', '--recursive', action='store_true', - help='run recursively over directories; ' - 'must be used with --in-place or --diff') - parser.add_argument('-j', '--jobs', type=int, metavar='n', default=1, - help='number of parallel jobs; ' - 'match CPU count if value is less than 1') - parser.add_argument('-p', '--pep8-passes', metavar='n', - default=-1, type=int, - help='maximum number of additional pep8 passes ' - '(default: infinite)') - parser.add_argument('-a', '--aggressive', action='count', default=0, - help='enable non-whitespace changes; ' - 'multiple -a result in more aggressive changes') - parser.add_argument('--experimental', action='store_true', - help='enable experimental fixes') - parser.add_argument('--exclude', metavar='globs', - help='exclude file/directory names that match these ' - 'comma-separated globs') - parser.add_argument('--list-fixes', action='store_true', - help='list codes for fixes; ' - 'used by --ignore and --select') - parser.add_argument('--ignore', metavar='errors', default='', - help='do not fix these errors/warnings ' - '(default: {0})'.format(DEFAULT_IGNORE)) - parser.add_argument('--select', metavar='errors', default='', - help='fix only these errors/warnings (e.g. E4,W)') - parser.add_argument('--max-line-length', metavar='n', default=79, type=int, - help='set maximum allowed line length ' - '(default: %(default)s)') - parser.add_argument('--line-range', '--range', metavar='line', - default=None, type=int, nargs=2, - help='only fix errors found within this inclusive ' - 'range of line numbers (e.g. 1 99); ' - 'line numbers are indexed at 1') - parser.add_argument('--indent-size', default=DEFAULT_INDENT_SIZE, - type=int, help=argparse.SUPPRESS) - parser.add_argument('files', nargs='*', - help="files to format or '-' for standard in") - - return parser - - -def parse_args(arguments, apply_config=False): - """Parse command-line options.""" - parser = create_parser() - args = parser.parse_args(arguments) - - if not args.files and not args.list_fixes: - parser.error('incorrect number of arguments') - - args.files = [decode_filename(name) for name in args.files] - - if apply_config: - parser = read_config(args, parser) - args = parser.parse_args(arguments) - args.files = [decode_filename(name) for name in args.files] - - if '-' in args.files: - if len(args.files) > 1: - parser.error('cannot mix stdin and regular files') - - if args.diff: - parser.error('--diff cannot be used with standard input') - - if args.in_place: - parser.error('--in-place cannot be used with standard input') - - if args.recursive: - parser.error('--recursive cannot be used with standard input') - - if len(args.files) > 1 and not (args.in_place or args.diff): - parser.error('autopep8 only takes one filename as argument ' - 'unless the "--in-place" or "--diff" args are ' - 'used') - - if args.recursive and not (args.in_place or args.diff): - parser.error('--recursive must be used with --in-place or --diff') - - if args.in_place and args.diff: - parser.error('--in-place and --diff are mutually exclusive') - - if args.max_line_length <= 0: - parser.error('--max-line-length must be greater than 0') - - if args.select: - args.select = _split_comma_separated(args.select) - - if args.ignore: - args.ignore = _split_comma_separated(args.ignore) - elif not args.select: - if args.aggressive: - # Enable everything by default if aggressive. - args.select = set(['E', 'W']) - else: - args.ignore = _split_comma_separated(DEFAULT_IGNORE) - - if args.exclude: - args.exclude = _split_comma_separated(args.exclude) - else: - args.exclude = set([]) - - if args.jobs < 1: - # Do not import multiprocessing globally in case it is not supported - # on the platform. - import multiprocessing - args.jobs = multiprocessing.cpu_count() - - if args.jobs > 1 and not args.in_place: - parser.error('parallel jobs requires --in-place') - - if args.line_range: - if args.line_range[0] <= 0: - parser.error('--range must be positive numbers') - if args.line_range[0] > args.line_range[1]: - parser.error('First value of --range should be less than or equal ' - 'to the second') - - return args - - -def read_config(args, parser): - """Read both user configuration and local configuration.""" - try: - from configparser import ConfigParser as SafeConfigParser - from configparser import Error - except ImportError: - from ConfigParser import SafeConfigParser - from ConfigParser import Error - - config = SafeConfigParser() - - try: - config.read(args.global_config) - - if not args.ignore_local_config: - parent = tail = args.files and os.path.abspath( - os.path.commonprefix(args.files)) - while tail: - if config.read([os.path.join(parent, fn) - for fn in PROJECT_CONFIG]): - break - (parent, tail) = os.path.split(parent) - - defaults = dict() - option_list = dict([(o.dest, o.type or type(o.default)) - for o in parser._actions]) - - for section in ['pep8', 'pycodestyle']: - if not config.has_section(section): - continue - for k, v in config.items(section): - norm_opt = k.lstrip('-').replace('-', '_') - opt_type = option_list[norm_opt] - if opt_type is int: - value = config.getint(section, k) - elif opt_type is bool: - value = config.getboolean(section, k) - else: - value = config.get(section, k) - defaults[norm_opt] = value - - parser.set_defaults(**defaults) - except Error: - # Ignore for now. - pass - - return parser - - -def _split_comma_separated(string): - """Return a set of strings.""" - return set(text.strip() for text in string.split(',') if text.strip()) - - -def decode_filename(filename): - """Return Unicode filename.""" - if isinstance(filename, unicode): - return filename - else: - return filename.decode(sys.getfilesystemencoding()) - - -def supported_fixes(): - """Yield pep8 error codes that autopep8 fixes. - - Each item we yield is a tuple of the code followed by its - description. - - """ - yield ('E101', docstring_summary(reindent.__doc__)) - - instance = FixPEP8(filename=None, options=None, contents='') - for attribute in dir(instance): - code = re.match('fix_([ew][0-9][0-9][0-9])', attribute) - if code: - yield ( - code.group(1).upper(), - re.sub(r'\s+', ' ', - docstring_summary(getattr(instance, attribute).__doc__)) - ) - - for (code, function) in sorted(global_fixes()): - yield (code.upper() + (4 - len(code)) * ' ', - re.sub(r'\s+', ' ', docstring_summary(function.__doc__))) - - for code in sorted(CODE_TO_2TO3): - yield (code.upper() + (4 - len(code)) * ' ', - re.sub(r'\s+', ' ', docstring_summary(fix_2to3.__doc__))) - - -def docstring_summary(docstring): - """Return summary of docstring.""" - return docstring.split('\n')[0] if docstring else '' - - -def line_shortening_rank(candidate, indent_word, max_line_length, - experimental=False): - """Return rank of candidate. - - This is for sorting candidates. - - """ - if not candidate.strip(): - return 0 - - rank = 0 - lines = candidate.rstrip().split('\n') - - offset = 0 - if ( - not lines[0].lstrip().startswith('#') and - lines[0].rstrip()[-1] not in '([{' - ): - for (opening, closing) in ('()', '[]', '{}'): - # Don't penalize empty containers that aren't split up. Things like - # this "foo(\n )" aren't particularly good. - opening_loc = lines[0].find(opening) - closing_loc = lines[0].find(closing) - if opening_loc >= 0: - if closing_loc < 0 or closing_loc != opening_loc + 1: - offset = max(offset, 1 + opening_loc) - - current_longest = max(offset + len(x.strip()) for x in lines) - - rank += 4 * max(0, current_longest - max_line_length) - - rank += len(lines) - - # Too much variation in line length is ugly. - rank += 2 * standard_deviation(len(line) for line in lines) - - bad_staring_symbol = { - '(': ')', - '[': ']', - '{': '}'}.get(lines[0][-1]) - - if len(lines) > 1: - if ( - bad_staring_symbol and - lines[1].lstrip().startswith(bad_staring_symbol) - ): - rank += 20 - - for lineno, current_line in enumerate(lines): - current_line = current_line.strip() - - if current_line.startswith('#'): - continue - - for bad_start in ['.', '%', '+', '-', '/']: - if current_line.startswith(bad_start): - rank += 100 - - # Do not tolerate operators on their own line. - if current_line == bad_start: - rank += 1000 - - if ( - current_line.endswith(('.', '%', '+', '-', '/')) and - "': " in current_line - ): - rank += 1000 - - if current_line.endswith(('(', '[', '{', '.')): - # Avoid lonely opening. They result in longer lines. - if len(current_line) <= len(indent_word): - rank += 100 - - # Avoid the ugliness of ", (\n". - if ( - current_line.endswith('(') and - current_line[:-1].rstrip().endswith(',') - ): - rank += 100 - - # Avoid the ugliness of "something[\n" and something[index][\n. - if ( - current_line.endswith('[') and - len(current_line) > 1 and - (current_line[-2].isalnum() or current_line[-2] in ']') - ): - rank += 300 - - # Also avoid the ugliness of "foo.\nbar" - if current_line.endswith('.'): - rank += 100 - - if has_arithmetic_operator(current_line): - rank += 100 - - # Avoid breaking at unary operators. - if re.match(r'.*[(\[{]\s*[\-\+~]$', current_line.rstrip('\\ ')): - rank += 1000 - - if re.match(r'.*lambda\s*\*$', current_line.rstrip('\\ ')): - rank += 1000 - - if current_line.endswith(('%', '(', '[', '{')): - rank -= 20 - - # Try to break list comprehensions at the "for". - if current_line.startswith('for '): - rank -= 50 - - if current_line.endswith('\\'): - # If a line ends in \-newline, it may be part of a - # multiline string. In that case, we would like to know - # how long that line is without the \-newline. If it's - # longer than the maximum, or has comments, then we assume - # that the \-newline is an okay candidate and only - # penalize it a bit. - total_len = len(current_line) - lineno += 1 - while lineno < len(lines): - total_len += len(lines[lineno]) - - if lines[lineno].lstrip().startswith('#'): - total_len = max_line_length - break - - if not lines[lineno].endswith('\\'): - break - - lineno += 1 - - if total_len < max_line_length: - rank += 10 - else: - rank += 100 if experimental else 1 - - # Prefer breaking at commas rather than colon. - if ',' in current_line and current_line.endswith(':'): - rank += 10 - - # Avoid splitting dictionaries between key and value. - if current_line.endswith(':'): - rank += 100 - - rank += 10 * count_unbalanced_brackets(current_line) - - return max(0, rank) - - -def standard_deviation(numbers): - """Return standard devation.""" - numbers = list(numbers) - if not numbers: - return 0 - mean = sum(numbers) / len(numbers) - return (sum((n - mean) ** 2 for n in numbers) / - len(numbers)) ** .5 - - -def has_arithmetic_operator(line): - """Return True if line contains any arithmetic operators.""" - for operator in pycodestyle.ARITHMETIC_OP: - if operator in line: - return True - - return False - - -def count_unbalanced_brackets(line): - """Return number of unmatched open/close brackets.""" - count = 0 - for opening, closing in ['()', '[]', '{}']: - count += abs(line.count(opening) - line.count(closing)) - - return count - - -def split_at_offsets(line, offsets): - """Split line at offsets. - - Return list of strings. - - """ - result = [] - - previous_offset = 0 - current_offset = 0 - for current_offset in sorted(offsets): - if current_offset < len(line) and previous_offset != current_offset: - result.append(line[previous_offset:current_offset].strip()) - previous_offset = current_offset - - result.append(line[current_offset:]) - - return result - - -class LineEndingWrapper(object): - - r"""Replace line endings to work with sys.stdout. - - It seems that sys.stdout expects only '\n' as the line ending, no matter - the platform. Otherwise, we get repeated line endings. - - """ - - def __init__(self, output): - self.__output = output - - def write(self, s): - self.__output.write(s.replace('\r\n', '\n').replace('\r', '\n')) - - def flush(self): - self.__output.flush() - - -def match_file(filename, exclude): - """Return True if file is okay for modifying/recursing.""" - base_name = os.path.basename(filename) - - if base_name.startswith('.'): - return False - - for pattern in exclude: - if fnmatch.fnmatch(base_name, pattern): - return False - if fnmatch.fnmatch(filename, pattern): - return False - - if not os.path.isdir(filename) and not is_python_file(filename): - return False - - return True - - -def find_files(filenames, recursive, exclude): - """Yield filenames.""" - while filenames: - name = filenames.pop(0) - if recursive and os.path.isdir(name): - for root, directories, children in os.walk(name): - filenames += [os.path.join(root, f) for f in children - if match_file(os.path.join(root, f), - exclude)] - directories[:] = [d for d in directories - if match_file(os.path.join(root, d), - exclude)] - else: - yield name - - -def _fix_file(parameters): - """Helper function for optionally running fix_file() in parallel.""" - if parameters[1].verbose: - print('[file:{0}]'.format(parameters[0]), file=sys.stderr) - try: - fix_file(*parameters) - except IOError as error: - print(unicode(error), file=sys.stderr) - - -def fix_multiple_files(filenames, options, output=None): - """Fix list of files. - - Optionally fix files recursively. - - """ - filenames = find_files(filenames, options.recursive, options.exclude) - if options.jobs > 1: - import multiprocessing - pool = multiprocessing.Pool(options.jobs) - pool.map(_fix_file, - [(name, options) for name in filenames]) - else: - for name in filenames: - _fix_file((name, options, output)) - - -def is_python_file(filename): - """Return True if filename is Python file.""" - if filename.endswith('.py'): - return True - - try: - with open_with_encoding( - filename, - limit_byte_check=MAX_PYTHON_FILE_DETECTION_BYTES) as f: - text = f.read(MAX_PYTHON_FILE_DETECTION_BYTES) - if not text: - return False - first_line = text.splitlines()[0] - except (IOError, IndexError): - return False - - if not PYTHON_SHEBANG_REGEX.match(first_line): - return False - - return True - - -def is_probably_part_of_multiline(line): - """Return True if line is likely part of a multiline string. - - When multiline strings are involved, pep8 reports the error as being - at the start of the multiline string, which doesn't work for us. - - """ - return ( - '"""' in line or - "'''" in line or - line.rstrip().endswith('\\') - ) - - -def wrap_output(output, encoding): - """Return output with specified encoding.""" - return codecs.getwriter(encoding)(output.buffer - if hasattr(output, 'buffer') - else output) - - -def get_encoding(): - """Return preferred encoding.""" - return locale.getpreferredencoding() or sys.getdefaultencoding() - - -def main(argv=None, apply_config=True): - """Command-line entry.""" - if argv is None: - argv = sys.argv - - try: - # Exit on broken pipe. - signal.signal(signal.SIGPIPE, signal.SIG_DFL) - except AttributeError: # pragma: no cover - # SIGPIPE is not available on Windows. - pass - - try: - args = parse_args(argv[1:], apply_config=apply_config) - - if args.list_fixes: - for code, description in sorted(supported_fixes()): - print('{code} - {description}'.format( - code=code, description=description)) - return 0 - - if args.files == ['-']: - assert not args.in_place - - encoding = sys.stdin.encoding or get_encoding() - - # LineEndingWrapper is unnecessary here due to the symmetry between - # standard in and standard out. - wrap_output(sys.stdout, encoding=encoding).write( - fix_code(sys.stdin.read(), args, encoding=encoding)) - else: - if args.in_place or args.diff: - args.files = list(set(args.files)) - else: - assert len(args.files) == 1 - assert not args.recursive - - fix_multiple_files(args.files, args, sys.stdout) - except KeyboardInterrupt: - return 1 # pragma: no cover - - -class CachedTokenizer(object): - - """A one-element cache around tokenize.generate_tokens(). - - Original code written by Ned Batchelder, in coverage.py. - - """ - - def __init__(self): - self.last_text = None - self.last_tokens = None - - def generate_tokens(self, text): - """A stand-in for tokenize.generate_tokens().""" - if text != self.last_text: - string_io = io.StringIO(text) - self.last_tokens = list( - tokenize.generate_tokens(string_io.readline) - ) - self.last_text = text - return self.last_tokens - - -_cached_tokenizer = CachedTokenizer() -generate_tokens = _cached_tokenizer.generate_tokens - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/.gitignore b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/.gitignore deleted file mode 100644 index 1c45ce5b..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.pickle diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/Grammar.txt b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/Grammar.txt deleted file mode 100644 index 1e1f24cf..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/Grammar.txt +++ /dev/null @@ -1,158 +0,0 @@ -# Grammar for 2to3. This grammar supports Python 2.x and 3.x. - -# Note: Changing the grammar specified in this file will most likely -# require corresponding changes in the parser module -# (../Modules/parsermodule.c). If you can't make the changes to -# that module yourself, please co-ordinate the required changes -# with someone who can; ask around on python-dev for help. Fred -# Drake will probably be listening there. - -# NOTE WELL: You should also follow all the steps listed in PEP 306, -# "How to Change Python's Grammar" - -# Commands for Kees Blom's railroad program -#diagram:token NAME -#diagram:token NUMBER -#diagram:token STRING -#diagram:token NEWLINE -#diagram:token ENDMARKER -#diagram:token INDENT -#diagram:output\input python.bla -#diagram:token DEDENT -#diagram:output\textwidth 20.04cm\oddsidemargin 0.0cm\evensidemargin 0.0cm -#diagram:rules - -# Start symbols for the grammar: -# file_input is a module or sequence of commands read from an input file; -# single_input is a single interactive statement; -# eval_input is the input for the eval() and input() functions. -# NB: compound_stmt in single_input is followed by extra NEWLINE! -file_input: (NEWLINE | stmt)* ENDMARKER -single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE -eval_input: testlist NEWLINE* ENDMARKER - -decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE -decorators: decorator+ -decorated: decorators (classdef | funcdef) -funcdef: 'def' NAME parameters ['->' test] ':' suite -parameters: '(' [typedargslist] ')' -typedargslist: ((tfpdef ['=' test] ',')* - ('*' [tname] (',' tname ['=' test])* [',' '**' tname] | '**' tname) - | tfpdef ['=' test] (',' tfpdef ['=' test])* [',']) -tname: NAME [':' test] -tfpdef: tname | '(' tfplist ')' -tfplist: tfpdef (',' tfpdef)* [','] -varargslist: ((vfpdef ['=' test] ',')* - ('*' [vname] (',' vname ['=' test])* [',' '**' vname] | '**' vname) - | vfpdef ['=' test] (',' vfpdef ['=' test])* [',']) -vname: NAME -vfpdef: vname | '(' vfplist ')' -vfplist: vfpdef (',' vfpdef)* [','] - -stmt: simple_stmt | compound_stmt -simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE -small_stmt: (expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt | - import_stmt | global_stmt | exec_stmt | assert_stmt) -expr_stmt: testlist_star_expr (augassign (yield_expr|testlist) | - ('=' (yield_expr|testlist_star_expr))*) -testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [','] -augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' | - '<<=' | '>>=' | '**=' | '//=') -# For normal assignments, additional restrictions enforced by the interpreter -print_stmt: 'print' ( [ test (',' test)* [','] ] | - '>>' test [ (',' test)+ [','] ] ) -del_stmt: 'del' exprlist -pass_stmt: 'pass' -flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt -break_stmt: 'break' -continue_stmt: 'continue' -return_stmt: 'return' [testlist] -yield_stmt: yield_expr -raise_stmt: 'raise' [test ['from' test | ',' test [',' test]]] -import_stmt: import_name | import_from -import_name: 'import' dotted_as_names -import_from: ('from' ('.'* dotted_name | '.'+) - 'import' ('*' | '(' import_as_names ')' | import_as_names)) -import_as_name: NAME ['as' NAME] -dotted_as_name: dotted_name ['as' NAME] -import_as_names: import_as_name (',' import_as_name)* [','] -dotted_as_names: dotted_as_name (',' dotted_as_name)* -dotted_name: NAME ('.' NAME)* -global_stmt: ('global' | 'nonlocal') NAME (',' NAME)* -exec_stmt: 'exec' expr ['in' test [',' test]] -assert_stmt: 'assert' test [',' test] - -compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated -if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite] -while_stmt: 'while' test ':' suite ['else' ':' suite] -for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] -try_stmt: ('try' ':' suite - ((except_clause ':' suite)+ - ['else' ':' suite] - ['finally' ':' suite] | - 'finally' ':' suite)) -with_stmt: 'with' with_item (',' with_item)* ':' suite -with_item: test ['as' expr] -with_var: 'as' expr -# NB compile.c makes sure that the default except clause is last -except_clause: 'except' [test [(',' | 'as') test]] -suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT - -# Backward compatibility cruft to support: -# [ x for x in lambda: True, lambda: False if x() ] -# even while also allowing: -# lambda x: 5 if x else 2 -# (But not a mix of the two) -testlist_safe: old_test [(',' old_test)+ [',']] -old_test: or_test | old_lambdef -old_lambdef: 'lambda' [varargslist] ':' old_test - -test: or_test ['if' or_test 'else' test] | lambdef -or_test: and_test ('or' and_test)* -and_test: not_test ('and' not_test)* -not_test: 'not' not_test | comparison -comparison: expr (comp_op expr)* -comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' -star_expr: '*' expr -expr: xor_expr ('|' xor_expr)* -xor_expr: and_expr ('^' and_expr)* -and_expr: shift_expr ('&' shift_expr)* -shift_expr: arith_expr (('<<'|'>>') arith_expr)* -arith_expr: term (('+'|'-') term)* -term: factor (('*'|'/'|'%'|'//') factor)* -factor: ('+'|'-'|'~') factor | power -power: atom trailer* ['**' factor] -atom: ('(' [yield_expr|testlist_gexp] ')' | - '[' [listmaker] ']' | - '{' [dictsetmaker] '}' | - '`' testlist1 '`' | - NAME | NUMBER | STRING+ | '.' '.' '.') -listmaker: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] ) -testlist_gexp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] ) -lambdef: 'lambda' [varargslist] ':' test -trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME -subscriptlist: subscript (',' subscript)* [','] -subscript: test | [test] ':' [test] [sliceop] -sliceop: ':' [test] -exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] -testlist: test (',' test)* [','] -dictsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) | - (test (comp_for | (',' test)* [','])) ) - -classdef: 'class' NAME ['(' [arglist] ')'] ':' suite - -arglist: (argument ',')* (argument [','] - |'*' test (',' argument)* [',' '**' test] - |'**' test) -argument: test [comp_for] | test '=' test # Really [keyword '='] test - -comp_iter: comp_for | comp_if -comp_for: 'for' exprlist 'in' testlist_safe [comp_iter] -comp_if: 'if' old_test [comp_iter] - -testlist1: test (',' test)* - -# not used in grammar, but may appear in "node" passed from Parser to Compiler -encoding_decl: NAME - -yield_expr: 'yield' [testlist] diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/PatternGrammar.txt b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/PatternGrammar.txt deleted file mode 100644 index 36bf8148..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/PatternGrammar.txt +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2006 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -# A grammar to describe tree matching patterns. -# Not shown here: -# - 'TOKEN' stands for any token (leaf node) -# - 'any' stands for any node (leaf or interior) -# With 'any' we can still specify the sub-structure. - -# The start symbol is 'Matcher'. - -Matcher: Alternatives ENDMARKER - -Alternatives: Alternative ('|' Alternative)* - -Alternative: (Unit | NegatedUnit)+ - -Unit: [NAME '='] ( STRING [Repeater] - | NAME [Details] [Repeater] - | '(' Alternatives ')' [Repeater] - | '[' Alternatives ']' - ) - -NegatedUnit: 'not' (STRING | NAME [Details] | '(' Alternatives ')') - -Repeater: '*' | '+' | '{' NUMBER [',' NUMBER] '}' - -Details: '<' Alternatives '>' diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/__init__.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/__init__.py deleted file mode 100644 index ea30561d..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/__init__.py +++ /dev/null @@ -1 +0,0 @@ -#empty diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/__main__.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/__main__.py deleted file mode 100644 index 80688baf..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/__main__.py +++ /dev/null @@ -1,4 +0,0 @@ -import sys -from .main import main - -sys.exit(main("lib2to3.fixes")) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/btm_matcher.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/btm_matcher.py deleted file mode 100644 index 736ba2b9..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/btm_matcher.py +++ /dev/null @@ -1,168 +0,0 @@ -"""A bottom-up tree matching algorithm implementation meant to speed -up 2to3's matching process. After the tree patterns are reduced to -their rarest linear path, a linear Aho-Corasick automaton is -created. The linear automaton traverses the linear paths from the -leaves to the root of the AST and returns a set of nodes for further -matching. This reduces significantly the number of candidate nodes.""" - -__author__ = "George Boutsioukis " - -import logging -import itertools -from collections import defaultdict - -from . import pytree -from .btm_utils import reduce_tree - -class BMNode(object): - """Class for a node of the Aho-Corasick automaton used in matching""" - count = itertools.count() - def __init__(self): - self.transition_table = {} - self.fixers = [] - self.id = next(BMNode.count) - self.content = '' - -class BottomMatcher(object): - """The main matcher class. After instantiating the patterns should - be added using the add_fixer method""" - - def __init__(self): - self.match = set() - self.root = BMNode() - self.nodes = [self.root] - self.fixers = [] - self.logger = logging.getLogger("RefactoringTool") - - def add_fixer(self, fixer): - """Reduces a fixer's pattern tree to a linear path and adds it - to the matcher(a common Aho-Corasick automaton). The fixer is - appended on the matching states and called when they are - reached""" - self.fixers.append(fixer) - tree = reduce_tree(fixer.pattern_tree) - linear = tree.get_linear_subpattern() - match_nodes = self.add(linear, start=self.root) - for match_node in match_nodes: - match_node.fixers.append(fixer) - - def add(self, pattern, start): - "Recursively adds a linear pattern to the AC automaton" - #print("adding pattern", pattern, "to", start) - if not pattern: - #print("empty pattern") - return [start] - if isinstance(pattern[0], tuple): - #alternatives - #print("alternatives") - match_nodes = [] - for alternative in pattern[0]: - #add all alternatives, and add the rest of the pattern - #to each end node - end_nodes = self.add(alternative, start=start) - for end in end_nodes: - match_nodes.extend(self.add(pattern[1:], end)) - return match_nodes - else: - #single token - #not last - if pattern[0] not in start.transition_table: - #transition did not exist, create new - next_node = BMNode() - start.transition_table[pattern[0]] = next_node - else: - #transition exists already, follow - next_node = start.transition_table[pattern[0]] - - if pattern[1:]: - end_nodes = self.add(pattern[1:], start=next_node) - else: - end_nodes = [next_node] - return end_nodes - - def run(self, leaves): - """The main interface with the bottom matcher. The tree is - traversed from the bottom using the constructed - automaton. Nodes are only checked once as the tree is - retraversed. When the automaton fails, we give it one more - shot(in case the above tree matches as a whole with the - rejected leaf), then we break for the next leaf. There is the - special case of multiple arguments(see code comments) where we - recheck the nodes - - Args: - The leaves of the AST tree to be matched - - Returns: - A dictionary of node matches with fixers as the keys - """ - current_ac_node = self.root - results = defaultdict(list) - for leaf in leaves: - current_ast_node = leaf - while current_ast_node: - current_ast_node.was_checked = True - for child in current_ast_node.children: - # multiple statements, recheck - if isinstance(child, pytree.Leaf) and child.value == u";": - current_ast_node.was_checked = False - break - if current_ast_node.type == 1: - #name - node_token = current_ast_node.value - else: - node_token = current_ast_node.type - - if node_token in current_ac_node.transition_table: - #token matches - current_ac_node = current_ac_node.transition_table[node_token] - for fixer in current_ac_node.fixers: - if not fixer in results: - results[fixer] = [] - results[fixer].append(current_ast_node) - - else: - #matching failed, reset automaton - current_ac_node = self.root - if (current_ast_node.parent is not None - and current_ast_node.parent.was_checked): - #the rest of the tree upwards has been checked, next leaf - break - - #recheck the rejected node once from the root - if node_token in current_ac_node.transition_table: - #token matches - current_ac_node = current_ac_node.transition_table[node_token] - for fixer in current_ac_node.fixers: - if not fixer in results.keys(): - results[fixer] = [] - results[fixer].append(current_ast_node) - - current_ast_node = current_ast_node.parent - return results - - def print_ac(self): - "Prints a graphviz diagram of the BM automaton(for debugging)" - print("digraph g{") - def print_node(node): - for subnode_key in node.transition_table.keys(): - subnode = node.transition_table[subnode_key] - print("%d -> %d [label=%s] //%s" % - (node.id, subnode.id, type_repr(subnode_key), str(subnode.fixers))) - if subnode_key == 1: - print(subnode.content) - print_node(subnode) - print_node(self.root) - print("}") - -# taken from pytree.py for debugging; only used by print_ac -_type_reprs = {} -def type_repr(type_num): - global _type_reprs - if not _type_reprs: - from .pygram import python_symbols - # printing tokens is possible but not as useful - # from .pgen2 import token // token.__dict__.items(): - for name, val in python_symbols.__dict__.items(): - if type(val) == int: _type_reprs[val] = name - return _type_reprs.setdefault(type_num, type_num) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/btm_utils.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/btm_utils.py deleted file mode 100644 index 2276dc9e..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/btm_utils.py +++ /dev/null @@ -1,283 +0,0 @@ -"Utility functions used by the btm_matcher module" - -from . import pytree -from .pgen2 import grammar, token -from .pygram import pattern_symbols, python_symbols - -syms = pattern_symbols -pysyms = python_symbols -tokens = grammar.opmap -token_labels = token - -TYPE_ANY = -1 -TYPE_ALTERNATIVES = -2 -TYPE_GROUP = -3 - -class MinNode(object): - """This class serves as an intermediate representation of the - pattern tree during the conversion to sets of leaf-to-root - subpatterns""" - - def __init__(self, type=None, name=None): - self.type = type - self.name = name - self.children = [] - self.leaf = False - self.parent = None - self.alternatives = [] - self.group = [] - - def __repr__(self): - return str(self.type) + ' ' + str(self.name) - - def leaf_to_root(self): - """Internal method. Returns a characteristic path of the - pattern tree. This method must be run for all leaves until the - linear subpatterns are merged into a single""" - node = self - subp = [] - while node: - if node.type == TYPE_ALTERNATIVES: - node.alternatives.append(subp) - if len(node.alternatives) == len(node.children): - #last alternative - subp = [tuple(node.alternatives)] - node.alternatives = [] - node = node.parent - continue - else: - node = node.parent - subp = None - break - - if node.type == TYPE_GROUP: - node.group.append(subp) - #probably should check the number of leaves - if len(node.group) == len(node.children): - subp = get_characteristic_subpattern(node.group) - node.group = [] - node = node.parent - continue - else: - node = node.parent - subp = None - break - - if node.type == token_labels.NAME and node.name: - #in case of type=name, use the name instead - subp.append(node.name) - else: - subp.append(node.type) - - node = node.parent - return subp - - def get_linear_subpattern(self): - """Drives the leaf_to_root method. The reason that - leaf_to_root must be run multiple times is because we need to - reject 'group' matches; for example the alternative form - (a | b c) creates a group [b c] that needs to be matched. Since - matching multiple linear patterns overcomes the automaton's - capabilities, leaf_to_root merges each group into a single - choice based on 'characteristic'ity, - - i.e. (a|b c) -> (a|b) if b more characteristic than c - - Returns: The most 'characteristic'(as defined by - get_characteristic_subpattern) path for the compiled pattern - tree. - """ - - for l in self.leaves(): - subp = l.leaf_to_root() - if subp: - return subp - - def leaves(self): - "Generator that returns the leaves of the tree" - for child in self.children: - for x in child.leaves(): - yield x - if not self.children: - yield self - -def reduce_tree(node, parent=None): - """ - Internal function. Reduces a compiled pattern tree to an - intermediate representation suitable for feeding the - automaton. This also trims off any optional pattern elements(like - [a], a*). - """ - - new_node = None - #switch on the node type - if node.type == syms.Matcher: - #skip - node = node.children[0] - - if node.type == syms.Alternatives : - #2 cases - if len(node.children) <= 2: - #just a single 'Alternative', skip this node - new_node = reduce_tree(node.children[0], parent) - else: - #real alternatives - new_node = MinNode(type=TYPE_ALTERNATIVES) - #skip odd children('|' tokens) - for child in node.children: - if node.children.index(child)%2: - continue - reduced = reduce_tree(child, new_node) - if reduced is not None: - new_node.children.append(reduced) - elif node.type == syms.Alternative: - if len(node.children) > 1: - - new_node = MinNode(type=TYPE_GROUP) - for child in node.children: - reduced = reduce_tree(child, new_node) - if reduced: - new_node.children.append(reduced) - if not new_node.children: - # delete the group if all of the children were reduced to None - new_node = None - - else: - new_node = reduce_tree(node.children[0], parent) - - elif node.type == syms.Unit: - if (isinstance(node.children[0], pytree.Leaf) and - node.children[0].value == '('): - #skip parentheses - return reduce_tree(node.children[1], parent) - if ((isinstance(node.children[0], pytree.Leaf) and - node.children[0].value == '[') - or - (len(node.children)>1 and - hasattr(node.children[1], "value") and - node.children[1].value == '[')): - #skip whole unit if its optional - return None - - leaf = True - details_node = None - alternatives_node = None - has_repeater = False - repeater_node = None - has_variable_name = False - - for child in node.children: - if child.type == syms.Details: - leaf = False - details_node = child - elif child.type == syms.Repeater: - has_repeater = True - repeater_node = child - elif child.type == syms.Alternatives: - alternatives_node = child - if hasattr(child, 'value') and child.value == '=': # variable name - has_variable_name = True - - #skip variable name - if has_variable_name: - #skip variable name, '=' - name_leaf = node.children[2] - if hasattr(name_leaf, 'value') and name_leaf.value == '(': - # skip parenthesis - name_leaf = node.children[3] - else: - name_leaf = node.children[0] - - #set node type - if name_leaf.type == token_labels.NAME: - #(python) non-name or wildcard - if name_leaf.value == 'any': - new_node = MinNode(type=TYPE_ANY) - else: - if hasattr(token_labels, name_leaf.value): - new_node = MinNode(type=getattr(token_labels, name_leaf.value)) - else: - new_node = MinNode(type=getattr(pysyms, name_leaf.value)) - - elif name_leaf.type == token_labels.STRING: - #(python) name or character; remove the apostrophes from - #the string value - name = name_leaf.value.strip("'") - if name in tokens: - new_node = MinNode(type=tokens[name]) - else: - new_node = MinNode(type=token_labels.NAME, name=name) - elif name_leaf.type == syms.Alternatives: - new_node = reduce_tree(alternatives_node, parent) - - #handle repeaters - if has_repeater: - if repeater_node.children[0].value == '*': - #reduce to None - new_node = None - elif repeater_node.children[0].value == '+': - #reduce to a single occurence i.e. do nothing - pass - else: - #TODO: handle {min, max} repeaters - raise NotImplementedError - pass - - #add children - if details_node and new_node is not None: - for child in details_node.children[1:-1]: - #skip '<', '>' markers - reduced = reduce_tree(child, new_node) - if reduced is not None: - new_node.children.append(reduced) - if new_node: - new_node.parent = parent - return new_node - - -def get_characteristic_subpattern(subpatterns): - """Picks the most characteristic from a list of linear patterns - Current order used is: - names > common_names > common_chars - """ - if not isinstance(subpatterns, list): - return subpatterns - if len(subpatterns)==1: - return subpatterns[0] - - # first pick out the ones containing variable names - subpatterns_with_names = [] - subpatterns_with_common_names = [] - common_names = ['in', 'for', 'if' , 'not', 'None'] - subpatterns_with_common_chars = [] - common_chars = "[]().,:" - for subpattern in subpatterns: - if any(rec_test(subpattern, lambda x: type(x) is str)): - if any(rec_test(subpattern, - lambda x: isinstance(x, str) and x in common_chars)): - subpatterns_with_common_chars.append(subpattern) - elif any(rec_test(subpattern, - lambda x: isinstance(x, str) and x in common_names)): - subpatterns_with_common_names.append(subpattern) - - else: - subpatterns_with_names.append(subpattern) - - if subpatterns_with_names: - subpatterns = subpatterns_with_names - elif subpatterns_with_common_names: - subpatterns = subpatterns_with_common_names - elif subpatterns_with_common_chars: - subpatterns = subpatterns_with_common_chars - # of the remaining subpatterns pick out the longest one - return max(subpatterns, key=len) - -def rec_test(sequence, test_func): - """Tests test_func on all items of sequence and items of included - sub-iterables""" - for x in sequence: - if isinstance(x, (list, tuple)): - for y in rec_test(x, test_func): - yield y - else: - yield test_func(x) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixer_base.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixer_base.py deleted file mode 100644 index f6421ba3..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixer_base.py +++ /dev/null @@ -1,189 +0,0 @@ -# Copyright 2006 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Base class for fixers (optional, but recommended).""" - -# Python imports -import logging -import itertools - -# Local imports -from .patcomp import PatternCompiler -from . import pygram -from .fixer_util import does_tree_import - -class BaseFix(object): - - """Optional base class for fixers. - - The subclass name must be FixFooBar where FooBar is the result of - removing underscores and capitalizing the words of the fix name. - For example, the class name for a fixer named 'has_key' should be - FixHasKey. - """ - - PATTERN = None # Most subclasses should override with a string literal - pattern = None # Compiled pattern, set by compile_pattern() - pattern_tree = None # Tree representation of the pattern - options = None # Options object passed to initializer - filename = None # The filename (set by set_filename) - logger = None # A logger (set by set_filename) - numbers = itertools.count(1) # For new_name() - used_names = set() # A set of all used NAMEs - order = "post" # Does the fixer prefer pre- or post-order traversal - explicit = False # Is this ignored by refactor.py -f all? - run_order = 5 # Fixers will be sorted by run order before execution - # Lower numbers will be run first. - _accept_type = None # [Advanced and not public] This tells RefactoringTool - # which node type to accept when there's not a pattern. - - keep_line_order = False # For the bottom matcher: match with the - # original line order - BM_compatible = False # Compatibility with the bottom matching - # module; every fixer should set this - # manually - - # Shortcut for access to Python grammar symbols - syms = pygram.python_symbols - - def __init__(self, options, log): - """Initializer. Subclass may override. - - Args: - options: an dict containing the options passed to RefactoringTool - that could be used to customize the fixer through the command line. - log: a list to append warnings and other messages to. - """ - self.options = options - self.log = log - self.compile_pattern() - - def compile_pattern(self): - """Compiles self.PATTERN into self.pattern. - - Subclass may override if it doesn't want to use - self.{pattern,PATTERN} in .match(). - """ - if self.PATTERN is not None: - PC = PatternCompiler() - self.pattern, self.pattern_tree = PC.compile_pattern(self.PATTERN, - with_tree=True) - - def set_filename(self, filename): - """Set the filename, and a logger derived from it. - - The main refactoring tool should call this. - """ - self.filename = filename - self.logger = logging.getLogger(filename) - - def match(self, node): - """Returns match for a given parse tree node. - - Should return a true or false object (not necessarily a bool). - It may return a non-empty dict of matching sub-nodes as - returned by a matching pattern. - - Subclass may override. - """ - results = {"node": node} - return self.pattern.match(node, results) and results - - def transform(self, node, results): - """Returns the transformation for a given parse tree node. - - Args: - node: the root of the parse tree that matched the fixer. - results: a dict mapping symbolic names to part of the match. - - Returns: - None, or a node that is a modified copy of the - argument node. The node argument may also be modified in-place to - effect the same change. - - Subclass *must* override. - """ - raise NotImplementedError() - - def new_name(self, template=u"xxx_todo_changeme"): - """Return a string suitable for use as an identifier - - The new name is guaranteed not to conflict with other identifiers. - """ - name = template - while name in self.used_names: - name = template + unicode(self.numbers.next()) - self.used_names.add(name) - return name - - def log_message(self, message): - if self.first_log: - self.first_log = False - self.log.append("### In file %s ###" % self.filename) - self.log.append(message) - - def cannot_convert(self, node, reason=None): - """Warn the user that a given chunk of code is not valid Python 3, - but that it cannot be converted automatically. - - First argument is the top-level node for the code in question. - Optional second argument is why it can't be converted. - """ - lineno = node.get_lineno() - for_output = node.clone() - for_output.prefix = u"" - msg = "Line %d: could not convert: %s" - self.log_message(msg % (lineno, for_output)) - if reason: - self.log_message(reason) - - def warning(self, node, reason): - """Used for warning the user about possible uncertainty in the - translation. - - First argument is the top-level node for the code in question. - Optional second argument is why it can't be converted. - """ - lineno = node.get_lineno() - self.log_message("Line %d: %s" % (lineno, reason)) - - def start_tree(self, tree, filename): - """Some fixers need to maintain tree-wide state. - This method is called once, at the start of tree fix-up. - - tree - the root node of the tree to be processed. - filename - the name of the file the tree came from. - """ - self.used_names = tree.used_names - self.set_filename(filename) - self.numbers = itertools.count(1) - self.first_log = True - - def finish_tree(self, tree, filename): - """Some fixers need to maintain tree-wide state. - This method is called once, at the conclusion of tree fix-up. - - tree - the root node of the tree to be processed. - filename - the name of the file the tree came from. - """ - pass - - -class ConditionalFix(BaseFix): - """ Base class for fixers which not execute if an import is found. """ - - # This is the name of the import which, if found, will cause the test to be skipped - skip_on = None - - def start_tree(self, *args): - super(ConditionalFix, self).start_tree(*args) - self._should_skip = None - - def should_skip(self, node): - if self._should_skip is not None: - return self._should_skip - pkg = self.skip_on.split(".") - name = pkg[-1] - pkg = ".".join(pkg[:-1]) - self._should_skip = does_tree_import(pkg, name, node) - return self._should_skip diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixer_util.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixer_util.py deleted file mode 100644 index 78fdf26d..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixer_util.py +++ /dev/null @@ -1,432 +0,0 @@ -"""Utility functions, node construction macros, etc.""" -# Author: Collin Winter - -from itertools import islice - -# Local imports -from .pgen2 import token -from .pytree import Leaf, Node -from .pygram import python_symbols as syms -from . import patcomp - - -########################################################### -### Common node-construction "macros" -########################################################### - -def KeywordArg(keyword, value): - return Node(syms.argument, - [keyword, Leaf(token.EQUAL, u"="), value]) - -def LParen(): - return Leaf(token.LPAR, u"(") - -def RParen(): - return Leaf(token.RPAR, u")") - -def Assign(target, source): - """Build an assignment statement""" - if not isinstance(target, list): - target = [target] - if not isinstance(source, list): - source.prefix = u" " - source = [source] - - return Node(syms.atom, - target + [Leaf(token.EQUAL, u"=", prefix=u" ")] + source) - -def Name(name, prefix=None): - """Return a NAME leaf""" - return Leaf(token.NAME, name, prefix=prefix) - -def Attr(obj, attr): - """A node tuple for obj.attr""" - return [obj, Node(syms.trailer, [Dot(), attr])] - -def Comma(): - """A comma leaf""" - return Leaf(token.COMMA, u",") - -def Dot(): - """A period (.) leaf""" - return Leaf(token.DOT, u".") - -def ArgList(args, lparen=LParen(), rparen=RParen()): - """A parenthesised argument list, used by Call()""" - node = Node(syms.trailer, [lparen.clone(), rparen.clone()]) - if args: - node.insert_child(1, Node(syms.arglist, args)) - return node - -def Call(func_name, args=None, prefix=None): - """A function call""" - node = Node(syms.power, [func_name, ArgList(args)]) - if prefix is not None: - node.prefix = prefix - return node - -def Newline(): - """A newline literal""" - return Leaf(token.NEWLINE, u"\n") - -def BlankLine(): - """A blank line""" - return Leaf(token.NEWLINE, u"") - -def Number(n, prefix=None): - return Leaf(token.NUMBER, n, prefix=prefix) - -def Subscript(index_node): - """A numeric or string subscript""" - return Node(syms.trailer, [Leaf(token.LBRACE, u"["), - index_node, - Leaf(token.RBRACE, u"]")]) - -def String(string, prefix=None): - """A string leaf""" - return Leaf(token.STRING, string, prefix=prefix) - -def ListComp(xp, fp, it, test=None): - """A list comprehension of the form [xp for fp in it if test]. - - If test is None, the "if test" part is omitted. - """ - xp.prefix = u"" - fp.prefix = u" " - it.prefix = u" " - for_leaf = Leaf(token.NAME, u"for") - for_leaf.prefix = u" " - in_leaf = Leaf(token.NAME, u"in") - in_leaf.prefix = u" " - inner_args = [for_leaf, fp, in_leaf, it] - if test: - test.prefix = u" " - if_leaf = Leaf(token.NAME, u"if") - if_leaf.prefix = u" " - inner_args.append(Node(syms.comp_if, [if_leaf, test])) - inner = Node(syms.listmaker, [xp, Node(syms.comp_for, inner_args)]) - return Node(syms.atom, - [Leaf(token.LBRACE, u"["), - inner, - Leaf(token.RBRACE, u"]")]) - -def FromImport(package_name, name_leafs): - """ Return an import statement in the form: - from package import name_leafs""" - # XXX: May not handle dotted imports properly (eg, package_name='foo.bar') - #assert package_name == '.' or '.' not in package_name, "FromImport has "\ - # "not been tested with dotted package names -- use at your own "\ - # "peril!" - - for leaf in name_leafs: - # Pull the leaves out of their old tree - leaf.remove() - - children = [Leaf(token.NAME, u"from"), - Leaf(token.NAME, package_name, prefix=u" "), - Leaf(token.NAME, u"import", prefix=u" "), - Node(syms.import_as_names, name_leafs)] - imp = Node(syms.import_from, children) - return imp - - -########################################################### -### Determine whether a node represents a given literal -########################################################### - -def is_tuple(node): - """Does the node represent a tuple literal?""" - if isinstance(node, Node) and node.children == [LParen(), RParen()]: - return True - return (isinstance(node, Node) - and len(node.children) == 3 - and isinstance(node.children[0], Leaf) - and isinstance(node.children[1], Node) - and isinstance(node.children[2], Leaf) - and node.children[0].value == u"(" - and node.children[2].value == u")") - -def is_list(node): - """Does the node represent a list literal?""" - return (isinstance(node, Node) - and len(node.children) > 1 - and isinstance(node.children[0], Leaf) - and isinstance(node.children[-1], Leaf) - and node.children[0].value == u"[" - and node.children[-1].value == u"]") - - -########################################################### -### Misc -########################################################### - -def parenthesize(node): - return Node(syms.atom, [LParen(), node, RParen()]) - - -consuming_calls = set(["sorted", "list", "set", "any", "all", "tuple", "sum", - "min", "max", "enumerate"]) - -def attr_chain(obj, attr): - """Follow an attribute chain. - - If you have a chain of objects where a.foo -> b, b.foo-> c, etc, - use this to iterate over all objects in the chain. Iteration is - terminated by getattr(x, attr) is None. - - Args: - obj: the starting object - attr: the name of the chaining attribute - - Yields: - Each successive object in the chain. - """ - next = getattr(obj, attr) - while next: - yield next - next = getattr(next, attr) - -p0 = """for_stmt< 'for' any 'in' node=any ':' any* > - | comp_for< 'for' any 'in' node=any any* > - """ -p1 = """ -power< - ( 'iter' | 'list' | 'tuple' | 'sorted' | 'set' | 'sum' | - 'any' | 'all' | 'enumerate' | (any* trailer< '.' 'join' >) ) - trailer< '(' node=any ')' > - any* -> -""" -p2 = """ -power< - ( 'sorted' | 'enumerate' ) - trailer< '(' arglist ')' > - any* -> -""" -pats_built = False -def in_special_context(node): - """ Returns true if node is in an environment where all that is required - of it is being iterable (ie, it doesn't matter if it returns a list - or an iterator). - See test_map_nochange in test_fixers.py for some examples and tests. - """ - global p0, p1, p2, pats_built - if not pats_built: - p0 = patcomp.compile_pattern(p0) - p1 = patcomp.compile_pattern(p1) - p2 = patcomp.compile_pattern(p2) - pats_built = True - patterns = [p0, p1, p2] - for pattern, parent in zip(patterns, attr_chain(node, "parent")): - results = {} - if pattern.match(parent, results) and results["node"] is node: - return True - return False - -def is_probably_builtin(node): - """ - Check that something isn't an attribute or function name etc. - """ - prev = node.prev_sibling - if prev is not None and prev.type == token.DOT: - # Attribute lookup. - return False - parent = node.parent - if parent.type in (syms.funcdef, syms.classdef): - return False - if parent.type == syms.expr_stmt and parent.children[0] is node: - # Assignment. - return False - if parent.type == syms.parameters or \ - (parent.type == syms.typedargslist and ( - (prev is not None and prev.type == token.COMMA) or - parent.children[0] is node - )): - # The name of an argument. - return False - return True - -def find_indentation(node): - """Find the indentation of *node*.""" - while node is not None: - if node.type == syms.suite and len(node.children) > 2: - indent = node.children[1] - if indent.type == token.INDENT: - return indent.value - node = node.parent - return u"" - -########################################################### -### The following functions are to find bindings in a suite -########################################################### - -def make_suite(node): - if node.type == syms.suite: - return node - node = node.clone() - parent, node.parent = node.parent, None - suite = Node(syms.suite, [node]) - suite.parent = parent - return suite - -def find_root(node): - """Find the top level namespace.""" - # Scamper up to the top level namespace - while node.type != syms.file_input: - node = node.parent - if not node: - raise ValueError("root found before file_input node was found.") - return node - -def does_tree_import(package, name, node): - """ Returns true if name is imported from package at the - top level of the tree which node belongs to. - To cover the case of an import like 'import foo', use - None for the package and 'foo' for the name. """ - binding = find_binding(name, find_root(node), package) - return bool(binding) - -def is_import(node): - """Returns true if the node is an import statement.""" - return node.type in (syms.import_name, syms.import_from) - -def touch_import(package, name, node): - """ Works like `does_tree_import` but adds an import statement - if it was not imported. """ - def is_import_stmt(node): - return (node.type == syms.simple_stmt and node.children and - is_import(node.children[0])) - - root = find_root(node) - - if does_tree_import(package, name, root): - return - - # figure out where to insert the new import. First try to find - # the first import and then skip to the last one. - insert_pos = offset = 0 - for idx, node in enumerate(root.children): - if not is_import_stmt(node): - continue - for offset, node2 in enumerate(root.children[idx:]): - if not is_import_stmt(node2): - break - insert_pos = idx + offset - break - - # if there are no imports where we can insert, find the docstring. - # if that also fails, we stick to the beginning of the file - if insert_pos == 0: - for idx, node in enumerate(root.children): - if (node.type == syms.simple_stmt and node.children and - node.children[0].type == token.STRING): - insert_pos = idx + 1 - break - - if package is None: - import_ = Node(syms.import_name, [ - Leaf(token.NAME, u"import"), - Leaf(token.NAME, name, prefix=u" ") - ]) - else: - import_ = FromImport(package, [Leaf(token.NAME, name, prefix=u" ")]) - - children = [import_, Newline()] - root.insert_child(insert_pos, Node(syms.simple_stmt, children)) - - -_def_syms = set([syms.classdef, syms.funcdef]) -def find_binding(name, node, package=None): - """ Returns the node which binds variable name, otherwise None. - If optional argument package is supplied, only imports will - be returned. - See test cases for examples.""" - for child in node.children: - ret = None - if child.type == syms.for_stmt: - if _find(name, child.children[1]): - return child - n = find_binding(name, make_suite(child.children[-1]), package) - if n: ret = n - elif child.type in (syms.if_stmt, syms.while_stmt): - n = find_binding(name, make_suite(child.children[-1]), package) - if n: ret = n - elif child.type == syms.try_stmt: - n = find_binding(name, make_suite(child.children[2]), package) - if n: - ret = n - else: - for i, kid in enumerate(child.children[3:]): - if kid.type == token.COLON and kid.value == ":": - # i+3 is the colon, i+4 is the suite - n = find_binding(name, make_suite(child.children[i+4]), package) - if n: ret = n - elif child.type in _def_syms and child.children[1].value == name: - ret = child - elif _is_import_binding(child, name, package): - ret = child - elif child.type == syms.simple_stmt: - ret = find_binding(name, child, package) - elif child.type == syms.expr_stmt: - if _find(name, child.children[0]): - ret = child - - if ret: - if not package: - return ret - if is_import(ret): - return ret - return None - -_block_syms = set([syms.funcdef, syms.classdef, syms.trailer]) -def _find(name, node): - nodes = [node] - while nodes: - node = nodes.pop() - if node.type > 256 and node.type not in _block_syms: - nodes.extend(node.children) - elif node.type == token.NAME and node.value == name: - return node - return None - -def _is_import_binding(node, name, package=None): - """ Will reuturn node if node will import name, or node - will import * from package. None is returned otherwise. - See test cases for examples. """ - - if node.type == syms.import_name and not package: - imp = node.children[1] - if imp.type == syms.dotted_as_names: - for child in imp.children: - if child.type == syms.dotted_as_name: - if child.children[2].value == name: - return node - elif child.type == token.NAME and child.value == name: - return node - elif imp.type == syms.dotted_as_name: - last = imp.children[-1] - if last.type == token.NAME and last.value == name: - return node - elif imp.type == token.NAME and imp.value == name: - return node - elif node.type == syms.import_from: - # unicode(...) is used to make life easier here, because - # from a.b import parses to ['import', ['a', '.', 'b'], ...] - if package and unicode(node.children[1]).strip() != package: - return None - n = node.children[3] - if package and _find(u"as", n): - # See test_from_import_as for explanation - return None - elif n.type == syms.import_as_names and _find(name, n): - return node - elif n.type == syms.import_as_name: - child = n.children[2] - if child.type == token.NAME and child.value == name: - return node - elif n.type == token.NAME and n.value == name: - return node - elif package and n.type == token.STAR: - return node - return None diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/__init__.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/__init__.py deleted file mode 100644 index b93054b3..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Dummy file to make this directory a package. diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_apply.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_apply.py deleted file mode 100644 index a7dc3a04..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_apply.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2006 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Fixer for apply(). - -This converts apply(func, v, k) into (func)(*v, **k).""" - -# Local imports -from .. import pytree -from ..pgen2 import token -from .. import fixer_base -from ..fixer_util import Call, Comma, parenthesize - -class FixApply(fixer_base.BaseFix): - BM_compatible = True - - PATTERN = """ - power< 'apply' - trailer< - '(' - arglist< - (not argument - ')' - > - > - """ - - def transform(self, node, results): - syms = self.syms - assert results - func = results["func"] - args = results["args"] - kwds = results.get("kwds") - prefix = node.prefix - func = func.clone() - if (func.type not in (token.NAME, syms.atom) and - (func.type != syms.power or - func.children[-2].type == token.DOUBLESTAR)): - # Need to parenthesize - func = parenthesize(func) - func.prefix = "" - args = args.clone() - args.prefix = "" - if kwds is not None: - kwds = kwds.clone() - kwds.prefix = "" - l_newargs = [pytree.Leaf(token.STAR, u"*"), args] - if kwds is not None: - l_newargs.extend([Comma(), - pytree.Leaf(token.DOUBLESTAR, u"**"), - kwds]) - l_newargs[-2].prefix = u" " # that's the ** token - # XXX Sometimes we could be cleverer, e.g. apply(f, (x, y) + t) - # can be translated into f(x, y, *t) instead of f(*(x, y) + t) - #new = pytree.Node(syms.power, (func, ArgList(l_newargs))) - return Call(func, l_newargs, prefix=prefix) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_basestring.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_basestring.py deleted file mode 100644 index a3c9a436..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_basestring.py +++ /dev/null @@ -1,14 +0,0 @@ -"""Fixer for basestring -> str.""" -# Author: Christian Heimes - -# Local imports -from .. import fixer_base -from ..fixer_util import Name - -class FixBasestring(fixer_base.BaseFix): - BM_compatible = True - - PATTERN = "'basestring'" - - def transform(self, node, results): - return Name(u"str", prefix=node.prefix) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_buffer.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_buffer.py deleted file mode 100644 index c6b09280..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_buffer.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2007 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Fixer that changes buffer(...) into memoryview(...).""" - -# Local imports -from .. import fixer_base -from ..fixer_util import Name - - -class FixBuffer(fixer_base.BaseFix): - BM_compatible = True - - explicit = True # The user must ask for this fixer - - PATTERN = """ - power< name='buffer' trailer< '(' [any] ')' > any* > - """ - - def transform(self, node, results): - name = results["name"] - name.replace(Name(u"memoryview", prefix=name.prefix)) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_callable.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_callable.py deleted file mode 100644 index df33d614..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_callable.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2007 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Fixer for callable(). - -This converts callable(obj) into isinstance(obj, collections.Callable), adding a -collections import if needed.""" - -# Local imports -from lib2to3 import fixer_base -from lib2to3.fixer_util import Call, Name, String, Attr, touch_import - -class FixCallable(fixer_base.BaseFix): - BM_compatible = True - - order = "pre" - - # Ignore callable(*args) or use of keywords. - # Either could be a hint that the builtin callable() is not being used. - PATTERN = """ - power< 'callable' - trailer< lpar='(' - ( not(arglist | argument) any ','> ) - rpar=')' > - after=any* - > - """ - - def transform(self, node, results): - func = results['func'] - - touch_import(None, u'collections', node=node) - - args = [func.clone(), String(u', ')] - args.extend(Attr(Name(u'collections'), Name(u'Callable'))) - return Call(Name(u'isinstance'), args, prefix=node.prefix) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_dict.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_dict.py deleted file mode 100644 index f681e4d7..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_dict.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright 2007 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Fixer for dict methods. - -d.keys() -> list(d.keys()) -d.items() -> list(d.items()) -d.values() -> list(d.values()) - -d.iterkeys() -> iter(d.keys()) -d.iteritems() -> iter(d.items()) -d.itervalues() -> iter(d.values()) - -d.viewkeys() -> d.keys() -d.viewitems() -> d.items() -d.viewvalues() -> d.values() - -Except in certain very specific contexts: the iter() can be dropped -when the context is list(), sorted(), iter() or for...in; the list() -can be dropped when the context is list() or sorted() (but not iter() -or for...in!). Special contexts that apply to both: list(), sorted(), tuple() -set(), any(), all(), sum(). - -Note: iter(d.keys()) could be written as iter(d) but since the -original d.iterkeys() was also redundant we don't fix this. And there -are (rare) contexts where it makes a difference (e.g. when passing it -as an argument to a function that introspects the argument). -""" - -# Local imports -from .. import pytree -from .. import patcomp -from ..pgen2 import token -from .. import fixer_base -from ..fixer_util import Name, Call, LParen, RParen, ArgList, Dot -from .. import fixer_util - - -iter_exempt = fixer_util.consuming_calls | set(["iter"]) - - -class FixDict(fixer_base.BaseFix): - BM_compatible = True - - PATTERN = """ - power< head=any+ - trailer< '.' method=('keys'|'items'|'values'| - 'iterkeys'|'iteritems'|'itervalues'| - 'viewkeys'|'viewitems'|'viewvalues') > - parens=trailer< '(' ')' > - tail=any* - > - """ - - def transform(self, node, results): - head = results["head"] - method = results["method"][0] # Extract node for method name - tail = results["tail"] - syms = self.syms - method_name = method.value - isiter = method_name.startswith(u"iter") - isview = method_name.startswith(u"view") - if isiter or isview: - method_name = method_name[4:] - assert method_name in (u"keys", u"items", u"values"), repr(method) - head = [n.clone() for n in head] - tail = [n.clone() for n in tail] - special = not tail and self.in_special_context(node, isiter) - args = head + [pytree.Node(syms.trailer, - [Dot(), - Name(method_name, - prefix=method.prefix)]), - results["parens"].clone()] - new = pytree.Node(syms.power, args) - if not (special or isview): - new.prefix = u"" - new = Call(Name(u"iter" if isiter else u"list"), [new]) - if tail: - new = pytree.Node(syms.power, [new] + tail) - new.prefix = node.prefix - return new - - P1 = "power< func=NAME trailer< '(' node=any ')' > any* >" - p1 = patcomp.compile_pattern(P1) - - P2 = """for_stmt< 'for' any 'in' node=any ':' any* > - | comp_for< 'for' any 'in' node=any any* > - """ - p2 = patcomp.compile_pattern(P2) - - def in_special_context(self, node, isiter): - if node.parent is None: - return False - results = {} - if (node.parent.parent is not None and - self.p1.match(node.parent.parent, results) and - results["node"] is node): - if isiter: - # iter(d.iterkeys()) -> iter(d.keys()), etc. - return results["func"].value in iter_exempt - else: - # list(d.keys()) -> list(d.keys()), etc. - return results["func"].value in fixer_util.consuming_calls - if not isiter: - return False - # for ... in d.iterkeys() -> for ... in d.keys(), etc. - return self.p2.match(node.parent, results) and results["node"] is node diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_except.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_except.py deleted file mode 100644 index e324718f..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_except.py +++ /dev/null @@ -1,93 +0,0 @@ -"""Fixer for except statements with named exceptions. - -The following cases will be converted: - -- "except E, T:" where T is a name: - - except E as T: - -- "except E, T:" where T is not a name, tuple or list: - - except E as t: - T = t - - This is done because the target of an "except" clause must be a - name. - -- "except E, T:" where T is a tuple or list literal: - - except E as t: - T = t.args -""" -# Author: Collin Winter - -# Local imports -from .. import pytree -from ..pgen2 import token -from .. import fixer_base -from ..fixer_util import Assign, Attr, Name, is_tuple, is_list, syms - -def find_excepts(nodes): - for i, n in enumerate(nodes): - if n.type == syms.except_clause: - if n.children[0].value == u'except': - yield (n, nodes[i+2]) - -class FixExcept(fixer_base.BaseFix): - BM_compatible = True - - PATTERN = """ - try_stmt< 'try' ':' (simple_stmt | suite) - cleanup=(except_clause ':' (simple_stmt | suite))+ - tail=(['except' ':' (simple_stmt | suite)] - ['else' ':' (simple_stmt | suite)] - ['finally' ':' (simple_stmt | suite)]) > - """ - - def transform(self, node, results): - syms = self.syms - - tail = [n.clone() for n in results["tail"]] - - try_cleanup = [ch.clone() for ch in results["cleanup"]] - for except_clause, e_suite in find_excepts(try_cleanup): - if len(except_clause.children) == 4: - (E, comma, N) = except_clause.children[1:4] - comma.replace(Name(u"as", prefix=u" ")) - - if N.type != token.NAME: - # Generate a new N for the except clause - new_N = Name(self.new_name(), prefix=u" ") - target = N.clone() - target.prefix = u"" - N.replace(new_N) - new_N = new_N.clone() - - # Insert "old_N = new_N" as the first statement in - # the except body. This loop skips leading whitespace - # and indents - #TODO(cwinter) suite-cleanup - suite_stmts = e_suite.children - for i, stmt in enumerate(suite_stmts): - if isinstance(stmt, pytree.Node): - break - - # The assignment is different if old_N is a tuple or list - # In that case, the assignment is old_N = new_N.args - if is_tuple(N) or is_list(N): - assign = Assign(target, Attr(new_N, Name(u'args'))) - else: - assign = Assign(target, new_N) - - #TODO(cwinter) stopgap until children becomes a smart list - for child in reversed(suite_stmts[:i]): - e_suite.insert_child(0, child) - e_suite.insert_child(i, assign) - elif N.prefix == u"": - # No space after a comma is legal; no space after "as", - # not so much. - N.prefix = u" " - - #TODO(cwinter) fix this when children becomes a smart list - children = [c.clone() for c in node.children[:3]] + try_cleanup + tail - return pytree.Node(node.type, children) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_exec.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_exec.py deleted file mode 100644 index 50e18544..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_exec.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2006 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Fixer for exec. - -This converts usages of the exec statement into calls to a built-in -exec() function. - -exec code in ns1, ns2 -> exec(code, ns1, ns2) -""" - -# Local imports -from .. import pytree -from .. import fixer_base -from ..fixer_util import Comma, Name, Call - - -class FixExec(fixer_base.BaseFix): - BM_compatible = True - - PATTERN = """ - exec_stmt< 'exec' a=any 'in' b=any [',' c=any] > - | - exec_stmt< 'exec' (not atom<'(' [any] ')'>) a=any > - """ - - def transform(self, node, results): - assert results - syms = self.syms - a = results["a"] - b = results.get("b") - c = results.get("c") - args = [a.clone()] - args[0].prefix = "" - if b is not None: - args.extend([Comma(), b.clone()]) - if c is not None: - args.extend([Comma(), c.clone()]) - - return Call(Name(u"exec"), args, prefix=node.prefix) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_execfile.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_execfile.py deleted file mode 100644 index 2f29d3b2..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_execfile.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2006 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Fixer for execfile. - -This converts usages of the execfile function into calls to the built-in -exec() function. -""" - -from .. import fixer_base -from ..fixer_util import (Comma, Name, Call, LParen, RParen, Dot, Node, - ArgList, String, syms) - - -class FixExecfile(fixer_base.BaseFix): - BM_compatible = True - - PATTERN = """ - power< 'execfile' trailer< '(' arglist< filename=any [',' globals=any [',' locals=any ] ] > ')' > > - | - power< 'execfile' trailer< '(' filename=any ')' > > - """ - - def transform(self, node, results): - assert results - filename = results["filename"] - globals = results.get("globals") - locals = results.get("locals") - - # Copy over the prefix from the right parentheses end of the execfile - # call. - execfile_paren = node.children[-1].children[-1].clone() - # Construct open().read(). - open_args = ArgList([filename.clone()], rparen=execfile_paren) - open_call = Node(syms.power, [Name(u"open"), open_args]) - read = [Node(syms.trailer, [Dot(), Name(u'read')]), - Node(syms.trailer, [LParen(), RParen()])] - open_expr = [open_call] + read - # Wrap the open call in a compile call. This is so the filename will be - # preserved in the execed code. - filename_arg = filename.clone() - filename_arg.prefix = u" " - exec_str = String(u"'exec'", u" ") - compile_args = open_expr + [Comma(), filename_arg, Comma(), exec_str] - compile_call = Call(Name(u"compile"), compile_args, u"") - # Finally, replace the execfile call with an exec call. - args = [compile_call] - if globals is not None: - args.extend([Comma(), globals.clone()]) - if locals is not None: - args.extend([Comma(), locals.clone()]) - return Call(Name(u"exec"), args, prefix=node.prefix) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_exitfunc.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_exitfunc.py deleted file mode 100644 index 89fb3db5..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_exitfunc.py +++ /dev/null @@ -1,72 +0,0 @@ -""" -Convert use of sys.exitfunc to use the atexit module. -""" - -# Author: Benjamin Peterson - -from lib2to3 import pytree, fixer_base -from lib2to3.fixer_util import Name, Attr, Call, Comma, Newline, syms - - -class FixExitfunc(fixer_base.BaseFix): - keep_line_order = True - BM_compatible = True - - PATTERN = """ - ( - sys_import=import_name<'import' - ('sys' - | - dotted_as_names< (any ',')* 'sys' (',' any)* > - ) - > - | - expr_stmt< - power< 'sys' trailer< '.' 'exitfunc' > > - '=' func=any > - ) - """ - - def __init__(self, *args): - super(FixExitfunc, self).__init__(*args) - - def start_tree(self, tree, filename): - super(FixExitfunc, self).start_tree(tree, filename) - self.sys_import = None - - def transform(self, node, results): - # First, find a the sys import. We'll just hope it's global scope. - if "sys_import" in results: - if self.sys_import is None: - self.sys_import = results["sys_import"] - return - - func = results["func"].clone() - func.prefix = u"" - register = pytree.Node(syms.power, - Attr(Name(u"atexit"), Name(u"register")) - ) - call = Call(register, [func], node.prefix) - node.replace(call) - - if self.sys_import is None: - # That's interesting. - self.warning(node, "Can't find sys import; Please add an atexit " - "import at the top of your file.") - return - - # Now add an atexit import after the sys import. - names = self.sys_import.children[1] - if names.type == syms.dotted_as_names: - names.append_child(Comma()) - names.append_child(Name(u"atexit", u" ")) - else: - containing_stmt = self.sys_import.parent - position = containing_stmt.children.index(self.sys_import) - stmt_container = containing_stmt.parent - new_import = pytree.Node(syms.import_name, - [Name(u"import"), Name(u"atexit", u" ")] - ) - new = pytree.Node(syms.simple_stmt, [new_import]) - containing_stmt.insert_child(position + 1, Newline()) - containing_stmt.insert_child(position + 2, new) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_filter.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_filter.py deleted file mode 100644 index 18ee2ffc..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_filter.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2007 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Fixer that changes filter(F, X) into list(filter(F, X)). - -We avoid the transformation if the filter() call is directly contained -in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or -for V in <>:. - -NOTE: This is still not correct if the original code was depending on -filter(F, X) to return a string if X is a string and a tuple if X is a -tuple. That would require type inference, which we don't do. Let -Python 2.6 figure it out. -""" - -# Local imports -from ..pgen2 import token -from .. import fixer_base -from ..fixer_util import Name, Call, ListComp, in_special_context - -class FixFilter(fixer_base.ConditionalFix): - BM_compatible = True - - PATTERN = """ - filter_lambda=power< - 'filter' - trailer< - '(' - arglist< - lambdef< 'lambda' - (fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any - > - ',' - it=any - > - ')' - > - > - | - power< - 'filter' - trailer< '(' arglist< none='None' ',' seq=any > ')' > - > - | - power< - 'filter' - args=trailer< '(' [any] ')' > - > - """ - - skip_on = "future_builtins.filter" - - def transform(self, node, results): - if self.should_skip(node): - return - - if "filter_lambda" in results: - new = ListComp(results.get("fp").clone(), - results.get("fp").clone(), - results.get("it").clone(), - results.get("xp").clone()) - - elif "none" in results: - new = ListComp(Name(u"_f"), - Name(u"_f"), - results["seq"].clone(), - Name(u"_f")) - - else: - if in_special_context(node): - return None - new = node.clone() - new.prefix = u"" - new = Call(Name(u"list"), [new]) - new.prefix = node.prefix - return new diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_funcattrs.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_funcattrs.py deleted file mode 100644 index 9e45c028..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_funcattrs.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Fix function attribute names (f.func_x -> f.__x__).""" -# Author: Collin Winter - -# Local imports -from .. import fixer_base -from ..fixer_util import Name - - -class FixFuncattrs(fixer_base.BaseFix): - BM_compatible = True - - PATTERN = """ - power< any+ trailer< '.' attr=('func_closure' | 'func_doc' | 'func_globals' - | 'func_name' | 'func_defaults' | 'func_code' - | 'func_dict') > any* > - """ - - def transform(self, node, results): - attr = results["attr"][0] - attr.replace(Name((u"__%s__" % attr.value[5:]), - prefix=attr.prefix)) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_future.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_future.py deleted file mode 100644 index fbcb86af..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_future.py +++ /dev/null @@ -1,22 +0,0 @@ -"""Remove __future__ imports - -from __future__ import foo is replaced with an empty line. -""" -# Author: Christian Heimes - -# Local imports -from .. import fixer_base -from ..fixer_util import BlankLine - -class FixFuture(fixer_base.BaseFix): - BM_compatible = True - - PATTERN = """import_from< 'from' module_name="__future__" 'import' any >""" - - # This should be run last -- some things check for the import - run_order = 10 - - def transform(self, node, results): - new = BlankLine() - new.prefix = node.prefix - return new diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_getcwdu.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_getcwdu.py deleted file mode 100644 index 82233c89..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_getcwdu.py +++ /dev/null @@ -1,19 +0,0 @@ -""" -Fixer that changes os.getcwdu() to os.getcwd(). -""" -# Author: Victor Stinner - -# Local imports -from .. import fixer_base -from ..fixer_util import Name - -class FixGetcwdu(fixer_base.BaseFix): - BM_compatible = True - - PATTERN = """ - power< 'os' trailer< dot='.' name='getcwdu' > any* > - """ - - def transform(self, node, results): - name = results["name"] - name.replace(Name(u"getcwd", prefix=name.prefix)) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_has_key.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_has_key.py deleted file mode 100644 index bead4cb5..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_has_key.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright 2006 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Fixer for has_key(). - -Calls to .has_key() methods are expressed in terms of the 'in' -operator: - - d.has_key(k) -> k in d - -CAVEATS: -1) While the primary target of this fixer is dict.has_key(), the - fixer will change any has_key() method call, regardless of its - class. - -2) Cases like this will not be converted: - - m = d.has_key - if m(k): - ... - - Only *calls* to has_key() are converted. While it is possible to - convert the above to something like - - m = d.__contains__ - if m(k): - ... - - this is currently not done. -""" - -# Local imports -from .. import pytree -from ..pgen2 import token -from .. import fixer_base -from ..fixer_util import Name, parenthesize - - -class FixHasKey(fixer_base.BaseFix): - BM_compatible = True - - PATTERN = """ - anchor=power< - before=any+ - trailer< '.' 'has_key' > - trailer< - '(' - ( not(arglist | argument) arg=any ','> - ) - ')' - > - after=any* - > - | - negation=not_test< - 'not' - anchor=power< - before=any+ - trailer< '.' 'has_key' > - trailer< - '(' - ( not(arglist | argument) arg=any ','> - ) - ')' - > - > - > - """ - - def transform(self, node, results): - assert results - syms = self.syms - if (node.parent.type == syms.not_test and - self.pattern.match(node.parent)): - # Don't transform a node matching the first alternative of the - # pattern when its parent matches the second alternative - return None - negation = results.get("negation") - anchor = results["anchor"] - prefix = node.prefix - before = [n.clone() for n in results["before"]] - arg = results["arg"].clone() - after = results.get("after") - if after: - after = [n.clone() for n in after] - if arg.type in (syms.comparison, syms.not_test, syms.and_test, - syms.or_test, syms.test, syms.lambdef, syms.argument): - arg = parenthesize(arg) - if len(before) == 1: - before = before[0] - else: - before = pytree.Node(syms.power, before) - before.prefix = u" " - n_op = Name(u"in", prefix=u" ") - if negation: - n_not = Name(u"not", prefix=u" ") - n_op = pytree.Node(syms.comp_op, (n_not, n_op)) - new = pytree.Node(syms.comparison, (arg, n_op, before)) - if after: - new = parenthesize(new) - new = pytree.Node(syms.power, (new,) + tuple(after)) - if node.parent.type in (syms.comparison, syms.expr, syms.xor_expr, - syms.and_expr, syms.shift_expr, - syms.arith_expr, syms.term, - syms.factor, syms.power): - new = parenthesize(new) - new.prefix = prefix - return new diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_idioms.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_idioms.py deleted file mode 100644 index 37b6eefa..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_idioms.py +++ /dev/null @@ -1,152 +0,0 @@ -"""Adjust some old Python 2 idioms to their modern counterparts. - -* Change some type comparisons to isinstance() calls: - type(x) == T -> isinstance(x, T) - type(x) is T -> isinstance(x, T) - type(x) != T -> not isinstance(x, T) - type(x) is not T -> not isinstance(x, T) - -* Change "while 1:" into "while True:". - -* Change both - - v = list(EXPR) - v.sort() - foo(v) - -and the more general - - v = EXPR - v.sort() - foo(v) - -into - - v = sorted(EXPR) - foo(v) -""" -# Author: Jacques Frechet, Collin Winter - -# Local imports -from .. import fixer_base -from ..fixer_util import Call, Comma, Name, Node, BlankLine, syms - -CMP = "(n='!=' | '==' | 'is' | n=comp_op< 'is' 'not' >)" -TYPE = "power< 'type' trailer< '(' x=any ')' > >" - -class FixIdioms(fixer_base.BaseFix): - explicit = True # The user must ask for this fixer - - PATTERN = r""" - isinstance=comparison< %s %s T=any > - | - isinstance=comparison< T=any %s %s > - | - while_stmt< 'while' while='1' ':' any+ > - | - sorted=any< - any* - simple_stmt< - expr_stmt< id1=any '=' - power< list='list' trailer< '(' (not arglist) any ')' > > - > - '\n' - > - sort= - simple_stmt< - power< id2=any - trailer< '.' 'sort' > trailer< '(' ')' > - > - '\n' - > - next=any* - > - | - sorted=any< - any* - simple_stmt< expr_stmt< id1=any '=' expr=any > '\n' > - sort= - simple_stmt< - power< id2=any - trailer< '.' 'sort' > trailer< '(' ')' > - > - '\n' - > - next=any* - > - """ % (TYPE, CMP, CMP, TYPE) - - def match(self, node): - r = super(FixIdioms, self).match(node) - # If we've matched one of the sort/sorted subpatterns above, we - # want to reject matches where the initial assignment and the - # subsequent .sort() call involve different identifiers. - if r and "sorted" in r: - if r["id1"] == r["id2"]: - return r - return None - return r - - def transform(self, node, results): - if "isinstance" in results: - return self.transform_isinstance(node, results) - elif "while" in results: - return self.transform_while(node, results) - elif "sorted" in results: - return self.transform_sort(node, results) - else: - raise RuntimeError("Invalid match") - - def transform_isinstance(self, node, results): - x = results["x"].clone() # The thing inside of type() - T = results["T"].clone() # The type being compared against - x.prefix = u"" - T.prefix = u" " - test = Call(Name(u"isinstance"), [x, Comma(), T]) - if "n" in results: - test.prefix = u" " - test = Node(syms.not_test, [Name(u"not"), test]) - test.prefix = node.prefix - return test - - def transform_while(self, node, results): - one = results["while"] - one.replace(Name(u"True", prefix=one.prefix)) - - def transform_sort(self, node, results): - sort_stmt = results["sort"] - next_stmt = results["next"] - list_call = results.get("list") - simple_expr = results.get("expr") - - if list_call: - list_call.replace(Name(u"sorted", prefix=list_call.prefix)) - elif simple_expr: - new = simple_expr.clone() - new.prefix = u"" - simple_expr.replace(Call(Name(u"sorted"), [new], - prefix=simple_expr.prefix)) - else: - raise RuntimeError("should not have reached here") - sort_stmt.remove() - - btwn = sort_stmt.prefix - # Keep any prefix lines between the sort_stmt and the list_call and - # shove them right after the sorted() call. - if u"\n" in btwn: - if next_stmt: - # The new prefix should be everything from the sort_stmt's - # prefix up to the last newline, then the old prefix after a new - # line. - prefix_lines = (btwn.rpartition(u"\n")[0], next_stmt[0].prefix) - next_stmt[0].prefix = u"\n".join(prefix_lines) - else: - assert list_call.parent - assert list_call.next_sibling is None - # Put a blank line after list_call and set its prefix. - end_line = BlankLine() - list_call.parent.append_child(end_line) - assert list_call.next_sibling is end_line - # The new prefix should be everything up to the first new line - # of sort_stmt's prefix. - end_line.prefix = btwn.rpartition(u"\n")[0] diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_import.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_import.py deleted file mode 100644 index 201e811e..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_import.py +++ /dev/null @@ -1,99 +0,0 @@ -"""Fixer for import statements. -If spam is being imported from the local directory, this import: - from spam import eggs -Becomes: - from .spam import eggs - -And this import: - import spam -Becomes: - from . import spam -""" - -# Local imports -from .. import fixer_base -from os.path import dirname, join, exists, sep -from ..fixer_util import FromImport, syms, token - - -def traverse_imports(names): - """ - Walks over all the names imported in a dotted_as_names node. - """ - pending = [names] - while pending: - node = pending.pop() - if node.type == token.NAME: - yield node.value - elif node.type == syms.dotted_name: - yield "".join([ch.value for ch in node.children]) - elif node.type == syms.dotted_as_name: - pending.append(node.children[0]) - elif node.type == syms.dotted_as_names: - pending.extend(node.children[::-2]) - else: - raise AssertionError("unkown node type") - - -class FixImport(fixer_base.BaseFix): - BM_compatible = True - - PATTERN = """ - import_from< 'from' imp=any 'import' ['('] any [')'] > - | - import_name< 'import' imp=any > - """ - - def start_tree(self, tree, name): - super(FixImport, self).start_tree(tree, name) - self.skip = "absolute_import" in tree.future_features - - def transform(self, node, results): - if self.skip: - return - imp = results['imp'] - - if node.type == syms.import_from: - # Some imps are top-level (eg: 'import ham') - # some are first level (eg: 'import ham.eggs') - # some are third level (eg: 'import ham.eggs as spam') - # Hence, the loop - while not hasattr(imp, 'value'): - imp = imp.children[0] - if self.probably_a_local_import(imp.value): - imp.value = u"." + imp.value - imp.changed() - else: - have_local = False - have_absolute = False - for mod_name in traverse_imports(imp): - if self.probably_a_local_import(mod_name): - have_local = True - else: - have_absolute = True - if have_absolute: - if have_local: - # We won't handle both sibling and absolute imports in the - # same statement at the moment. - self.warning(node, "absolute and local imports together") - return - - new = FromImport(u".", [imp]) - new.prefix = node.prefix - return new - - def probably_a_local_import(self, imp_name): - if imp_name.startswith(u"."): - # Relative imports are certainly not local imports. - return False - imp_name = imp_name.split(u".", 1)[0] - base_path = dirname(self.filename) - base_path = join(base_path, imp_name) - # If there is no __init__.py next to the file its not in a package - # so can't be a relative import. - if not exists(join(dirname(base_path), "__init__.py")): - return False - for ext in [".py", sep, ".pyc", ".so", ".sl", ".pyd"]: - if exists(base_path + ext): - return True - return False diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_imports.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_imports.py deleted file mode 100644 index 93c9e678..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_imports.py +++ /dev/null @@ -1,145 +0,0 @@ -"""Fix incompatible imports and module references.""" -# Authors: Collin Winter, Nick Edds - -# Local imports -from .. import fixer_base -from ..fixer_util import Name, attr_chain - -MAPPING = {'StringIO': 'io', - 'cStringIO': 'io', - 'cPickle': 'pickle', - '__builtin__' : 'builtins', - 'copy_reg': 'copyreg', - 'Queue': 'queue', - 'SocketServer': 'socketserver', - 'ConfigParser': 'configparser', - 'repr': 'reprlib', - 'FileDialog': 'tkinter.filedialog', - 'tkFileDialog': 'tkinter.filedialog', - 'SimpleDialog': 'tkinter.simpledialog', - 'tkSimpleDialog': 'tkinter.simpledialog', - 'tkColorChooser': 'tkinter.colorchooser', - 'tkCommonDialog': 'tkinter.commondialog', - 'Dialog': 'tkinter.dialog', - 'Tkdnd': 'tkinter.dnd', - 'tkFont': 'tkinter.font', - 'tkMessageBox': 'tkinter.messagebox', - 'ScrolledText': 'tkinter.scrolledtext', - 'Tkconstants': 'tkinter.constants', - 'Tix': 'tkinter.tix', - 'ttk': 'tkinter.ttk', - 'Tkinter': 'tkinter', - 'markupbase': '_markupbase', - '_winreg': 'winreg', - 'thread': '_thread', - 'dummy_thread': '_dummy_thread', - # anydbm and whichdb are handled by fix_imports2 - 'dbhash': 'dbm.bsd', - 'dumbdbm': 'dbm.dumb', - 'dbm': 'dbm.ndbm', - 'gdbm': 'dbm.gnu', - 'xmlrpclib': 'xmlrpc.client', - 'DocXMLRPCServer': 'xmlrpc.server', - 'SimpleXMLRPCServer': 'xmlrpc.server', - 'httplib': 'http.client', - 'htmlentitydefs' : 'html.entities', - 'HTMLParser' : 'html.parser', - 'Cookie': 'http.cookies', - 'cookielib': 'http.cookiejar', - 'BaseHTTPServer': 'http.server', - 'SimpleHTTPServer': 'http.server', - 'CGIHTTPServer': 'http.server', - #'test.test_support': 'test.support', - 'commands': 'subprocess', - 'UserString' : 'collections', - 'UserList' : 'collections', - 'urlparse' : 'urllib.parse', - 'robotparser' : 'urllib.robotparser', -} - - -def alternates(members): - return "(" + "|".join(map(repr, members)) + ")" - - -def build_pattern(mapping=MAPPING): - mod_list = ' | '.join(["module_name='%s'" % key for key in mapping]) - bare_names = alternates(mapping.keys()) - - yield """name_import=import_name< 'import' ((%s) | - multiple_imports=dotted_as_names< any* (%s) any* >) > - """ % (mod_list, mod_list) - yield """import_from< 'from' (%s) 'import' ['('] - ( any | import_as_name< any 'as' any > | - import_as_names< any* >) [')'] > - """ % mod_list - yield """import_name< 'import' (dotted_as_name< (%s) 'as' any > | - multiple_imports=dotted_as_names< - any* dotted_as_name< (%s) 'as' any > any* >) > - """ % (mod_list, mod_list) - - # Find usages of module members in code e.g. thread.foo(bar) - yield "power< bare_with_attr=(%s) trailer<'.' any > any* >" % bare_names - - -class FixImports(fixer_base.BaseFix): - - BM_compatible = True - keep_line_order = True - # This is overridden in fix_imports2. - mapping = MAPPING - - # We want to run this fixer late, so fix_import doesn't try to make stdlib - # renames into relative imports. - run_order = 6 - - def build_pattern(self): - return "|".join(build_pattern(self.mapping)) - - def compile_pattern(self): - # We override this, so MAPPING can be pragmatically altered and the - # changes will be reflected in PATTERN. - self.PATTERN = self.build_pattern() - super(FixImports, self).compile_pattern() - - # Don't match the node if it's within another match. - def match(self, node): - match = super(FixImports, self).match - results = match(node) - if results: - # Module usage could be in the trailer of an attribute lookup, so we - # might have nested matches when "bare_with_attr" is present. - if "bare_with_attr" not in results and \ - any(match(obj) for obj in attr_chain(node, "parent")): - return False - return results - return False - - def start_tree(self, tree, filename): - super(FixImports, self).start_tree(tree, filename) - self.replace = {} - - def transform(self, node, results): - import_mod = results.get("module_name") - if import_mod: - mod_name = import_mod.value - new_name = unicode(self.mapping[mod_name]) - import_mod.replace(Name(new_name, prefix=import_mod.prefix)) - if "name_import" in results: - # If it's not a "from x import x, y" or "import x as y" import, - # marked its usage to be replaced. - self.replace[mod_name] = new_name - if "multiple_imports" in results: - # This is a nasty hack to fix multiple imports on a line (e.g., - # "import StringIO, urlparse"). The problem is that I can't - # figure out an easy way to make a pattern recognize the keys of - # MAPPING randomly sprinkled in an import statement. - results = self.match(node) - if results: - self.transform(node, results) - else: - # Replace usage of the module. - bare_name = results["bare_with_attr"][0] - new_name = self.replace.get(bare_name.value) - if new_name: - bare_name.replace(Name(new_name, prefix=bare_name.prefix)) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_imports2.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_imports2.py deleted file mode 100644 index 9a33c67b..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_imports2.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Fix incompatible imports and module references that must be fixed after -fix_imports.""" -from . import fix_imports - - -MAPPING = { - 'whichdb': 'dbm', - 'anydbm': 'dbm', - } - - -class FixImports2(fix_imports.FixImports): - - run_order = 7 - - mapping = MAPPING diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_input.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_input.py deleted file mode 100644 index fbf4c72f..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_input.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Fixer that changes input(...) into eval(input(...)).""" -# Author: Andre Roberge - -# Local imports -from .. import fixer_base -from ..fixer_util import Call, Name -from .. import patcomp - - -context = patcomp.compile_pattern("power< 'eval' trailer< '(' any ')' > >") - - -class FixInput(fixer_base.BaseFix): - BM_compatible = True - PATTERN = """ - power< 'input' args=trailer< '(' [any] ')' > > - """ - - def transform(self, node, results): - # If we're already wrapped in a eval() call, we're done. - if context.match(node.parent.parent): - return - - new = node.clone() - new.prefix = u"" - return Call(Name(u"eval"), [new], prefix=node.prefix) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_intern.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_intern.py deleted file mode 100644 index e7bb5052..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_intern.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2006 Georg Brandl. -# Licensed to PSF under a Contributor Agreement. - -"""Fixer for intern(). - -intern(s) -> sys.intern(s)""" - -# Local imports -from .. import pytree -from .. import fixer_base -from ..fixer_util import Name, Attr, touch_import - - -class FixIntern(fixer_base.BaseFix): - BM_compatible = True - order = "pre" - - PATTERN = """ - power< 'intern' - trailer< lpar='(' - ( not(arglist | argument) any ','> ) - rpar=')' > - after=any* - > - """ - - def transform(self, node, results): - syms = self.syms - obj = results["obj"].clone() - if obj.type == syms.arglist: - newarglist = obj.clone() - else: - newarglist = pytree.Node(syms.arglist, [obj.clone()]) - after = results["after"] - if after: - after = [n.clone() for n in after] - new = pytree.Node(syms.power, - Attr(Name(u"sys"), Name(u"intern")) + - [pytree.Node(syms.trailer, - [results["lpar"].clone(), - newarglist, - results["rpar"].clone()])] + after) - new.prefix = node.prefix - touch_import(None, u'sys', node) - return new diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_isinstance.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_isinstance.py deleted file mode 100644 index 4b04c8fd..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_isinstance.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2008 Armin Ronacher. -# Licensed to PSF under a Contributor Agreement. - -"""Fixer that cleans up a tuple argument to isinstance after the tokens -in it were fixed. This is mainly used to remove double occurrences of -tokens as a leftover of the long -> int / unicode -> str conversion. - -eg. isinstance(x, (int, long)) -> isinstance(x, (int, int)) - -> isinstance(x, int) -""" - -from .. import fixer_base -from ..fixer_util import token - - -class FixIsinstance(fixer_base.BaseFix): - BM_compatible = True - PATTERN = """ - power< - 'isinstance' - trailer< '(' arglist< any ',' atom< '(' - args=testlist_gexp< any+ > - ')' > > ')' > - > - """ - - run_order = 6 - - def transform(self, node, results): - names_inserted = set() - testlist = results["args"] - args = testlist.children - new_args = [] - iterator = enumerate(args) - for idx, arg in iterator: - if arg.type == token.NAME and arg.value in names_inserted: - if idx < len(args) - 1 and args[idx + 1].type == token.COMMA: - iterator.next() - continue - else: - new_args.append(arg) - if arg.type == token.NAME: - names_inserted.add(arg.value) - if new_args and new_args[-1].type == token.COMMA: - del new_args[-1] - if len(new_args) == 1: - atom = testlist.parent - new_args[0].prefix = atom.prefix - atom.replace(new_args[0]) - else: - args[:] = new_args - node.changed() diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_itertools.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_itertools.py deleted file mode 100644 index 067641b8..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_itertools.py +++ /dev/null @@ -1,43 +0,0 @@ -""" Fixer for itertools.(imap|ifilter|izip) --> (map|filter|zip) and - itertools.ifilterfalse --> itertools.filterfalse (bugs 2360-2363) - - imports from itertools are fixed in fix_itertools_import.py - - If itertools is imported as something else (ie: import itertools as it; - it.izip(spam, eggs)) method calls will not get fixed. - """ - -# Local imports -from .. import fixer_base -from ..fixer_util import Name - -class FixItertools(fixer_base.BaseFix): - BM_compatible = True - it_funcs = "('imap'|'ifilter'|'izip'|'izip_longest'|'ifilterfalse')" - PATTERN = """ - power< it='itertools' - trailer< - dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > > - | - power< func=%(it_funcs)s trailer< '(' [any] ')' > > - """ %(locals()) - - # Needs to be run after fix_(map|zip|filter) - run_order = 6 - - def transform(self, node, results): - prefix = None - func = results['func'][0] - if ('it' in results and - func.value not in (u'ifilterfalse', u'izip_longest')): - dot, it = (results['dot'], results['it']) - # Remove the 'itertools' - prefix = it.prefix - it.remove() - # Replace the node which contains ('.', 'function') with the - # function (to be consistent with the second part of the pattern) - dot.remove() - func.parent.replace(func) - - prefix = prefix or func.prefix - func.replace(Name(func.value[1:], prefix=prefix)) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_itertools_imports.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_itertools_imports.py deleted file mode 100644 index 28610cfc..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_itertools_imports.py +++ /dev/null @@ -1,57 +0,0 @@ -""" Fixer for imports of itertools.(imap|ifilter|izip|ifilterfalse) """ - -# Local imports -from lib2to3 import fixer_base -from lib2to3.fixer_util import BlankLine, syms, token - - -class FixItertoolsImports(fixer_base.BaseFix): - BM_compatible = True - PATTERN = """ - import_from< 'from' 'itertools' 'import' imports=any > - """ %(locals()) - - def transform(self, node, results): - imports = results['imports'] - if imports.type == syms.import_as_name or not imports.children: - children = [imports] - else: - children = imports.children - for child in children[::2]: - if child.type == token.NAME: - member = child.value - name_node = child - elif child.type == token.STAR: - # Just leave the import as is. - return - else: - assert child.type == syms.import_as_name - name_node = child.children[0] - member_name = name_node.value - if member_name in (u'imap', u'izip', u'ifilter'): - child.value = None - child.remove() - elif member_name in (u'ifilterfalse', u'izip_longest'): - node.changed() - name_node.value = (u'filterfalse' if member_name[1] == u'f' - else u'zip_longest') - - # Make sure the import statement is still sane - children = imports.children[:] or [imports] - remove_comma = True - for child in children: - if remove_comma and child.type == token.COMMA: - child.remove() - else: - remove_comma ^= True - - while children and children[-1].type == token.COMMA: - children.pop().remove() - - # If there are no imports left, just get rid of the entire statement - if (not (imports.children or getattr(imports, 'value', None)) or - imports.parent is None): - p = node.prefix - node = BlankLine() - node.prefix = p - return node diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_long.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_long.py deleted file mode 100644 index 5dddde0d..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_long.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2006 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Fixer that turns 'long' into 'int' everywhere. -""" - -# Local imports -from lib2to3 import fixer_base -from lib2to3.fixer_util import is_probably_builtin - - -class FixLong(fixer_base.BaseFix): - BM_compatible = True - PATTERN = "'long'" - - def transform(self, node, results): - if is_probably_builtin(node): - node.value = u"int" - node.changed() diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_map.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_map.py deleted file mode 100644 index 7a7d0dbc..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_map.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2007 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Fixer that changes map(F, ...) into list(map(F, ...)) unless there -exists a 'from future_builtins import map' statement in the top-level -namespace. - -As a special case, map(None, X) is changed into list(X). (This is -necessary because the semantics are changed in this case -- the new -map(None, X) is equivalent to [(x,) for x in X].) - -We avoid the transformation (except for the special case mentioned -above) if the map() call is directly contained in iter(<>), list(<>), -tuple(<>), sorted(<>), ...join(<>), or for V in <>:. - -NOTE: This is still not correct if the original code was depending on -map(F, X, Y, ...) to go on until the longest argument is exhausted, -substituting None for missing values -- like zip(), it now stops as -soon as the shortest argument is exhausted. -""" - -# Local imports -from ..pgen2 import token -from .. import fixer_base -from ..fixer_util import Name, Call, ListComp, in_special_context -from ..pygram import python_symbols as syms - -class FixMap(fixer_base.ConditionalFix): - BM_compatible = True - - PATTERN = """ - map_none=power< - 'map' - trailer< '(' arglist< 'None' ',' arg=any [','] > ')' > - > - | - map_lambda=power< - 'map' - trailer< - '(' - arglist< - lambdef< 'lambda' - (fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any - > - ',' - it=any - > - ')' - > - > - | - power< - 'map' trailer< '(' [arglist=any] ')' > - > - """ - - skip_on = 'future_builtins.map' - - def transform(self, node, results): - if self.should_skip(node): - return - - if node.parent.type == syms.simple_stmt: - self.warning(node, "You should use a for loop here") - new = node.clone() - new.prefix = u"" - new = Call(Name(u"list"), [new]) - elif "map_lambda" in results: - new = ListComp(results["xp"].clone(), - results["fp"].clone(), - results["it"].clone()) - else: - if "map_none" in results: - new = results["arg"].clone() - else: - if "arglist" in results: - args = results["arglist"] - if args.type == syms.arglist and \ - args.children[0].type == token.NAME and \ - args.children[0].value == "None": - self.warning(node, "cannot convert map(None, ...) " - "with multiple arguments because map() " - "now truncates to the shortest sequence") - return - if in_special_context(node): - return None - new = node.clone() - new.prefix = u"" - new = Call(Name(u"list"), [new]) - new.prefix = node.prefix - return new diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_metaclass.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_metaclass.py deleted file mode 100644 index 23da8ccd..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_metaclass.py +++ /dev/null @@ -1,228 +0,0 @@ -"""Fixer for __metaclass__ = X -> (metaclass=X) methods. - - The various forms of classef (inherits nothing, inherits once, inherints - many) don't parse the same in the CST so we look at ALL classes for - a __metaclass__ and if we find one normalize the inherits to all be - an arglist. - - For one-liner classes ('class X: pass') there is no indent/dedent so - we normalize those into having a suite. - - Moving the __metaclass__ into the classdef can also cause the class - body to be empty so there is some special casing for that as well. - - This fixer also tries very hard to keep original indenting and spacing - in all those corner cases. - -""" -# Author: Jack Diederich - -# Local imports -from .. import fixer_base -from ..pygram import token -from ..fixer_util import Name, syms, Node, Leaf - - -def has_metaclass(parent): - """ we have to check the cls_node without changing it. - There are two possiblities: - 1) clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta') - 2) clsdef => simple_stmt => expr_stmt => Leaf('__meta') - """ - for node in parent.children: - if node.type == syms.suite: - return has_metaclass(node) - elif node.type == syms.simple_stmt and node.children: - expr_node = node.children[0] - if expr_node.type == syms.expr_stmt and expr_node.children: - left_side = expr_node.children[0] - if isinstance(left_side, Leaf) and \ - left_side.value == '__metaclass__': - return True - return False - - -def fixup_parse_tree(cls_node): - """ one-line classes don't get a suite in the parse tree so we add - one to normalize the tree - """ - for node in cls_node.children: - if node.type == syms.suite: - # already in the preferred format, do nothing - return - - # !%@#! oneliners have no suite node, we have to fake one up - for i, node in enumerate(cls_node.children): - if node.type == token.COLON: - break - else: - raise ValueError("No class suite and no ':'!") - - # move everything into a suite node - suite = Node(syms.suite, []) - while cls_node.children[i+1:]: - move_node = cls_node.children[i+1] - suite.append_child(move_node.clone()) - move_node.remove() - cls_node.append_child(suite) - node = suite - - -def fixup_simple_stmt(parent, i, stmt_node): - """ if there is a semi-colon all the parts count as part of the same - simple_stmt. We just want the __metaclass__ part so we move - everything after the semi-colon into its own simple_stmt node - """ - for semi_ind, node in enumerate(stmt_node.children): - if node.type == token.SEMI: # *sigh* - break - else: - return - - node.remove() # kill the semicolon - new_expr = Node(syms.expr_stmt, []) - new_stmt = Node(syms.simple_stmt, [new_expr]) - while stmt_node.children[semi_ind:]: - move_node = stmt_node.children[semi_ind] - new_expr.append_child(move_node.clone()) - move_node.remove() - parent.insert_child(i, new_stmt) - new_leaf1 = new_stmt.children[0].children[0] - old_leaf1 = stmt_node.children[0].children[0] - new_leaf1.prefix = old_leaf1.prefix - - -def remove_trailing_newline(node): - if node.children and node.children[-1].type == token.NEWLINE: - node.children[-1].remove() - - -def find_metas(cls_node): - # find the suite node (Mmm, sweet nodes) - for node in cls_node.children: - if node.type == syms.suite: - break - else: - raise ValueError("No class suite!") - - # look for simple_stmt[ expr_stmt[ Leaf('__metaclass__') ] ] - for i, simple_node in list(enumerate(node.children)): - if simple_node.type == syms.simple_stmt and simple_node.children: - expr_node = simple_node.children[0] - if expr_node.type == syms.expr_stmt and expr_node.children: - # Check if the expr_node is a simple assignment. - left_node = expr_node.children[0] - if isinstance(left_node, Leaf) and \ - left_node.value == u'__metaclass__': - # We found a assignment to __metaclass__. - fixup_simple_stmt(node, i, simple_node) - remove_trailing_newline(simple_node) - yield (node, i, simple_node) - - -def fixup_indent(suite): - """ If an INDENT is followed by a thing with a prefix then remove the prefix - Otherwise we get in trouble when removing __metaclass__ at suite start - """ - kids = suite.children[::-1] - # find the first indent - while kids: - node = kids.pop() - if node.type == token.INDENT: - break - - # find the first Leaf - while kids: - node = kids.pop() - if isinstance(node, Leaf) and node.type != token.DEDENT: - if node.prefix: - node.prefix = u'' - return - else: - kids.extend(node.children[::-1]) - - -class FixMetaclass(fixer_base.BaseFix): - BM_compatible = True - - PATTERN = """ - classdef - """ - - def transform(self, node, results): - if not has_metaclass(node): - return - - fixup_parse_tree(node) - - # find metaclasses, keep the last one - last_metaclass = None - for suite, i, stmt in find_metas(node): - last_metaclass = stmt - stmt.remove() - - text_type = node.children[0].type # always Leaf(nnn, 'class') - - # figure out what kind of classdef we have - if len(node.children) == 7: - # Node(classdef, ['class', 'name', '(', arglist, ')', ':', suite]) - # 0 1 2 3 4 5 6 - if node.children[3].type == syms.arglist: - arglist = node.children[3] - # Node(classdef, ['class', 'name', '(', 'Parent', ')', ':', suite]) - else: - parent = node.children[3].clone() - arglist = Node(syms.arglist, [parent]) - node.set_child(3, arglist) - elif len(node.children) == 6: - # Node(classdef, ['class', 'name', '(', ')', ':', suite]) - # 0 1 2 3 4 5 - arglist = Node(syms.arglist, []) - node.insert_child(3, arglist) - elif len(node.children) == 4: - # Node(classdef, ['class', 'name', ':', suite]) - # 0 1 2 3 - arglist = Node(syms.arglist, []) - node.insert_child(2, Leaf(token.RPAR, u')')) - node.insert_child(2, arglist) - node.insert_child(2, Leaf(token.LPAR, u'(')) - else: - raise ValueError("Unexpected class definition") - - # now stick the metaclass in the arglist - meta_txt = last_metaclass.children[0].children[0] - meta_txt.value = 'metaclass' - orig_meta_prefix = meta_txt.prefix - - if arglist.children: - arglist.append_child(Leaf(token.COMMA, u',')) - meta_txt.prefix = u' ' - else: - meta_txt.prefix = u'' - - # compact the expression "metaclass = Meta" -> "metaclass=Meta" - expr_stmt = last_metaclass.children[0] - assert expr_stmt.type == syms.expr_stmt - expr_stmt.children[1].prefix = u'' - expr_stmt.children[2].prefix = u'' - - arglist.append_child(last_metaclass) - - fixup_indent(suite) - - # check for empty suite - if not suite.children: - # one-liner that was just __metaclass_ - suite.remove() - pass_leaf = Leaf(text_type, u'pass') - pass_leaf.prefix = orig_meta_prefix - node.append_child(pass_leaf) - node.append_child(Leaf(token.NEWLINE, u'\n')) - - elif len(suite.children) > 1 and \ - (suite.children[-2].type == token.INDENT and - suite.children[-1].type == token.DEDENT): - # there was only one line in the class body and it was __metaclass__ - pass_leaf = Leaf(text_type, u'pass') - suite.insert_child(-1, pass_leaf) - suite.insert_child(-1, Leaf(token.NEWLINE, u'\n')) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_methodattrs.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_methodattrs.py deleted file mode 100644 index f3c1ecfe..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_methodattrs.py +++ /dev/null @@ -1,24 +0,0 @@ -"""Fix bound method attributes (method.im_? -> method.__?__). -""" -# Author: Christian Heimes - -# Local imports -from .. import fixer_base -from ..fixer_util import Name - -MAP = { - "im_func" : "__func__", - "im_self" : "__self__", - "im_class" : "__self__.__class__" - } - -class FixMethodattrs(fixer_base.BaseFix): - BM_compatible = True - PATTERN = """ - power< any+ trailer< '.' attr=('im_func' | 'im_self' | 'im_class') > any* > - """ - - def transform(self, node, results): - attr = results["attr"][0] - new = unicode(MAP[attr.value]) - attr.replace(Name(new, prefix=attr.prefix)) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_ne.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_ne.py deleted file mode 100644 index 7025980b..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_ne.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2006 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Fixer that turns <> into !=.""" - -# Local imports -from .. import pytree -from ..pgen2 import token -from .. import fixer_base - - -class FixNe(fixer_base.BaseFix): - # This is so simple that we don't need the pattern compiler. - - _accept_type = token.NOTEQUAL - - def match(self, node): - # Override - return node.value == u"<>" - - def transform(self, node, results): - new = pytree.Leaf(token.NOTEQUAL, u"!=", prefix=node.prefix) - return new diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_next.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_next.py deleted file mode 100644 index f021a9bd..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_next.py +++ /dev/null @@ -1,103 +0,0 @@ -"""Fixer for it.next() -> next(it), per PEP 3114.""" -# Author: Collin Winter - -# Things that currently aren't covered: -# - listcomp "next" names aren't warned -# - "with" statement targets aren't checked - -# Local imports -from ..pgen2 import token -from ..pygram import python_symbols as syms -from .. import fixer_base -from ..fixer_util import Name, Call, find_binding - -bind_warning = "Calls to builtin next() possibly shadowed by global binding" - - -class FixNext(fixer_base.BaseFix): - BM_compatible = True - PATTERN = """ - power< base=any+ trailer< '.' attr='next' > trailer< '(' ')' > > - | - power< head=any+ trailer< '.' attr='next' > not trailer< '(' ')' > > - | - classdef< 'class' any+ ':' - suite< any* - funcdef< 'def' - name='next' - parameters< '(' NAME ')' > any+ > - any* > > - | - global=global_stmt< 'global' any* 'next' any* > - """ - - order = "pre" # Pre-order tree traversal - - def start_tree(self, tree, filename): - super(FixNext, self).start_tree(tree, filename) - - n = find_binding(u'next', tree) - if n: - self.warning(n, bind_warning) - self.shadowed_next = True - else: - self.shadowed_next = False - - def transform(self, node, results): - assert results - - base = results.get("base") - attr = results.get("attr") - name = results.get("name") - - if base: - if self.shadowed_next: - attr.replace(Name(u"__next__", prefix=attr.prefix)) - else: - base = [n.clone() for n in base] - base[0].prefix = u"" - node.replace(Call(Name(u"next", prefix=node.prefix), base)) - elif name: - n = Name(u"__next__", prefix=name.prefix) - name.replace(n) - elif attr: - # We don't do this transformation if we're assigning to "x.next". - # Unfortunately, it doesn't seem possible to do this in PATTERN, - # so it's being done here. - if is_assign_target(node): - head = results["head"] - if "".join([str(n) for n in head]).strip() == u'__builtin__': - self.warning(node, bind_warning) - return - attr.replace(Name(u"__next__")) - elif "global" in results: - self.warning(node, bind_warning) - self.shadowed_next = True - - -### The following functions help test if node is part of an assignment -### target. - -def is_assign_target(node): - assign = find_assign(node) - if assign is None: - return False - - for child in assign.children: - if child.type == token.EQUAL: - return False - elif is_subtree(child, node): - return True - return False - -def find_assign(node): - if node.type == syms.expr_stmt: - return node - if node.type == syms.simple_stmt or node.parent is None: - return None - return find_assign(node.parent) - -def is_subtree(root, node): - if root == node: - return True - return any(is_subtree(c, node) for c in root.children) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_nonzero.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_nonzero.py deleted file mode 100644 index ba83478f..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_nonzero.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Fixer for __nonzero__ -> __bool__ methods.""" -# Author: Collin Winter - -# Local imports -from .. import fixer_base -from ..fixer_util import Name, syms - -class FixNonzero(fixer_base.BaseFix): - BM_compatible = True - PATTERN = """ - classdef< 'class' any+ ':' - suite< any* - funcdef< 'def' name='__nonzero__' - parameters< '(' NAME ')' > any+ > - any* > > - """ - - def transform(self, node, results): - name = results["name"] - new = Name(u"__bool__", prefix=name.prefix) - name.replace(new) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_numliterals.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_numliterals.py deleted file mode 100644 index b0c23f80..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_numliterals.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Fixer that turns 1L into 1, 0755 into 0o755. -""" -# Copyright 2007 Georg Brandl. -# Licensed to PSF under a Contributor Agreement. - -# Local imports -from ..pgen2 import token -from .. import fixer_base -from ..fixer_util import Number - - -class FixNumliterals(fixer_base.BaseFix): - # This is so simple that we don't need the pattern compiler. - - _accept_type = token.NUMBER - - def match(self, node): - # Override - return (node.value.startswith(u"0") or node.value[-1] in u"Ll") - - def transform(self, node, results): - val = node.value - if val[-1] in u'Ll': - val = val[:-1] - elif val.startswith(u'0') and val.isdigit() and len(set(val)) > 1: - val = u"0o" + val[1:] - - return Number(val, prefix=node.prefix) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_operator.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_operator.py deleted file mode 100644 index 7bf2c0dd..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_operator.py +++ /dev/null @@ -1,96 +0,0 @@ -"""Fixer for operator functions. - -operator.isCallable(obj) -> hasattr(obj, '__call__') -operator.sequenceIncludes(obj) -> operator.contains(obj) -operator.isSequenceType(obj) -> isinstance(obj, collections.Sequence) -operator.isMappingType(obj) -> isinstance(obj, collections.Mapping) -operator.isNumberType(obj) -> isinstance(obj, numbers.Number) -operator.repeat(obj, n) -> operator.mul(obj, n) -operator.irepeat(obj, n) -> operator.imul(obj, n) -""" - -# Local imports -from lib2to3 import fixer_base -from lib2to3.fixer_util import Call, Name, String, touch_import - - -def invocation(s): - def dec(f): - f.invocation = s - return f - return dec - - -class FixOperator(fixer_base.BaseFix): - BM_compatible = True - order = "pre" - - methods = """ - method=('isCallable'|'sequenceIncludes' - |'isSequenceType'|'isMappingType'|'isNumberType' - |'repeat'|'irepeat') - """ - obj = "'(' obj=any ')'" - PATTERN = """ - power< module='operator' - trailer< '.' %(methods)s > trailer< %(obj)s > > - | - power< %(methods)s trailer< %(obj)s > > - """ % dict(methods=methods, obj=obj) - - def transform(self, node, results): - method = self._check_method(node, results) - if method is not None: - return method(node, results) - - @invocation("operator.contains(%s)") - def _sequenceIncludes(self, node, results): - return self._handle_rename(node, results, u"contains") - - @invocation("hasattr(%s, '__call__')") - def _isCallable(self, node, results): - obj = results["obj"] - args = [obj.clone(), String(u", "), String(u"'__call__'")] - return Call(Name(u"hasattr"), args, prefix=node.prefix) - - @invocation("operator.mul(%s)") - def _repeat(self, node, results): - return self._handle_rename(node, results, u"mul") - - @invocation("operator.imul(%s)") - def _irepeat(self, node, results): - return self._handle_rename(node, results, u"imul") - - @invocation("isinstance(%s, collections.Sequence)") - def _isSequenceType(self, node, results): - return self._handle_type2abc(node, results, u"collections", u"Sequence") - - @invocation("isinstance(%s, collections.Mapping)") - def _isMappingType(self, node, results): - return self._handle_type2abc(node, results, u"collections", u"Mapping") - - @invocation("isinstance(%s, numbers.Number)") - def _isNumberType(self, node, results): - return self._handle_type2abc(node, results, u"numbers", u"Number") - - def _handle_rename(self, node, results, name): - method = results["method"][0] - method.value = name - method.changed() - - def _handle_type2abc(self, node, results, module, abc): - touch_import(None, module, node) - obj = results["obj"] - args = [obj.clone(), String(u", " + u".".join([module, abc]))] - return Call(Name(u"isinstance"), args, prefix=node.prefix) - - def _check_method(self, node, results): - method = getattr(self, "_" + results["method"][0].value.encode("ascii")) - if callable(method): - if "module" in results: - return method - else: - sub = (unicode(results["obj"]),) - invocation_str = unicode(method.invocation) % sub - self.warning(node, u"You should use '%s' here." % invocation_str) - return None diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_paren.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_paren.py deleted file mode 100644 index 8650cd90..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_paren.py +++ /dev/null @@ -1,44 +0,0 @@ -"""Fixer that addes parentheses where they are required - -This converts ``[x for x in 1, 2]`` to ``[x for x in (1, 2)]``.""" - -# By Taek Joo Kim and Benjamin Peterson - -# Local imports -from .. import fixer_base -from ..fixer_util import LParen, RParen - -# XXX This doesn't support nested for loops like [x for x in 1, 2 for x in 1, 2] -class FixParen(fixer_base.BaseFix): - BM_compatible = True - - PATTERN = """ - atom< ('[' | '(') - (listmaker< any - comp_for< - 'for' NAME 'in' - target=testlist_safe< any (',' any)+ [','] - > - [any] - > - > - | - testlist_gexp< any - comp_for< - 'for' NAME 'in' - target=testlist_safe< any (',' any)+ [','] - > - [any] - > - >) - (']' | ')') > - """ - - def transform(self, node, results): - target = results["target"] - - lparen = LParen() - lparen.prefix = target.prefix - target.prefix = u"" # Make it hug the parentheses - target.insert_child(0, lparen) - target.append_child(RParen()) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_print.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_print.py deleted file mode 100644 index 98786b3e..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_print.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright 2006 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Fixer for print. - -Change: - 'print' into 'print()' - 'print ...' into 'print(...)' - 'print ... ,' into 'print(..., end=" ")' - 'print >>x, ...' into 'print(..., file=x)' - -No changes are applied if print_function is imported from __future__ - -""" - -# Local imports -from .. import patcomp -from .. import pytree -from ..pgen2 import token -from .. import fixer_base -from ..fixer_util import Name, Call, Comma, String, is_tuple - - -parend_expr = patcomp.compile_pattern( - """atom< '(' [atom|STRING|NAME] ')' >""" - ) - - -class FixPrint(fixer_base.BaseFix): - - BM_compatible = True - - PATTERN = """ - simple_stmt< any* bare='print' any* > | print_stmt - """ - - def transform(self, node, results): - assert results - - bare_print = results.get("bare") - - if bare_print: - # Special-case print all by itself - bare_print.replace(Call(Name(u"print"), [], - prefix=bare_print.prefix)) - return - assert node.children[0] == Name(u"print") - args = node.children[1:] - if len(args) == 1 and parend_expr.match(args[0]): - # We don't want to keep sticking parens around an - # already-parenthesised expression. - return - - sep = end = file = None - if args and args[-1] == Comma(): - args = args[:-1] - end = " " - if args and args[0] == pytree.Leaf(token.RIGHTSHIFT, u">>"): - assert len(args) >= 2 - file = args[1].clone() - args = args[3:] # Strip a possible comma after the file expression - # Now synthesize a print(args, sep=..., end=..., file=...) node. - l_args = [arg.clone() for arg in args] - if l_args: - l_args[0].prefix = u"" - if sep is not None or end is not None or file is not None: - if sep is not None: - self.add_kwarg(l_args, u"sep", String(repr(sep))) - if end is not None: - self.add_kwarg(l_args, u"end", String(repr(end))) - if file is not None: - self.add_kwarg(l_args, u"file", file) - n_stmt = Call(Name(u"print"), l_args) - n_stmt.prefix = node.prefix - return n_stmt - - def add_kwarg(self, l_nodes, s_kwd, n_expr): - # XXX All this prefix-setting may lose comments (though rarely) - n_expr.prefix = u"" - n_argument = pytree.Node(self.syms.argument, - (Name(s_kwd), - pytree.Leaf(token.EQUAL, u"="), - n_expr)) - if l_nodes: - l_nodes.append(Comma()) - n_argument.prefix = u" " - l_nodes.append(n_argument) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_raise.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_raise.py deleted file mode 100644 index b958ba01..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_raise.py +++ /dev/null @@ -1,90 +0,0 @@ -"""Fixer for 'raise E, V, T' - -raise -> raise -raise E -> raise E -raise E, V -> raise E(V) -raise E, V, T -> raise E(V).with_traceback(T) -raise E, None, T -> raise E.with_traceback(T) - -raise (((E, E'), E''), E'''), V -> raise E(V) -raise "foo", V, T -> warns about string exceptions - - -CAVEATS: -1) "raise E, V" will be incorrectly translated if V is an exception - instance. The correct Python 3 idiom is - - raise E from V - - but since we can't detect instance-hood by syntax alone and since - any client code would have to be changed as well, we don't automate - this. -""" -# Author: Collin Winter - -# Local imports -from .. import pytree -from ..pgen2 import token -from .. import fixer_base -from ..fixer_util import Name, Call, Attr, ArgList, is_tuple - -class FixRaise(fixer_base.BaseFix): - - BM_compatible = True - PATTERN = """ - raise_stmt< 'raise' exc=any [',' val=any [',' tb=any]] > - """ - - def transform(self, node, results): - syms = self.syms - - exc = results["exc"].clone() - if exc.type == token.STRING: - msg = "Python 3 does not support string exceptions" - self.cannot_convert(node, msg) - return - - # Python 2 supports - # raise ((((E1, E2), E3), E4), E5), V - # as a synonym for - # raise E1, V - # Since Python 3 will not support this, we recurse down any tuple - # literals, always taking the first element. - if is_tuple(exc): - while is_tuple(exc): - # exc.children[1:-1] is the unparenthesized tuple - # exc.children[1].children[0] is the first element of the tuple - exc = exc.children[1].children[0].clone() - exc.prefix = u" " - - if "val" not in results: - # One-argument raise - new = pytree.Node(syms.raise_stmt, [Name(u"raise"), exc]) - new.prefix = node.prefix - return new - - val = results["val"].clone() - if is_tuple(val): - args = [c.clone() for c in val.children[1:-1]] - else: - val.prefix = u"" - args = [val] - - if "tb" in results: - tb = results["tb"].clone() - tb.prefix = u"" - - e = exc - # If there's a traceback and None is passed as the value, then don't - # add a call, since the user probably just wants to add a - # traceback. See issue #9661. - if val.type != token.NAME or val.value != u"None": - e = Call(exc, args) - with_tb = Attr(e, Name(u'with_traceback')) + [ArgList([tb])] - new = pytree.Node(syms.simple_stmt, [Name(u"raise")] + with_tb) - new.prefix = node.prefix - return new - else: - return pytree.Node(syms.raise_stmt, - [Name(u"raise"), Call(exc, args)], - prefix=node.prefix) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_raw_input.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_raw_input.py deleted file mode 100644 index 3a73b818..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_raw_input.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Fixer that changes raw_input(...) into input(...).""" -# Author: Andre Roberge - -# Local imports -from .. import fixer_base -from ..fixer_util import Name - -class FixRawInput(fixer_base.BaseFix): - - BM_compatible = True - PATTERN = """ - power< name='raw_input' trailer< '(' [any] ')' > any* > - """ - - def transform(self, node, results): - name = results["name"] - name.replace(Name(u"input", prefix=name.prefix)) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_reduce.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_reduce.py deleted file mode 100644 index 6bd785c1..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_reduce.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2008 Armin Ronacher. -# Licensed to PSF under a Contributor Agreement. - -"""Fixer for reduce(). - -Makes sure reduce() is imported from the functools module if reduce is -used in that module. -""" - -from lib2to3 import fixer_base -from lib2to3.fixer_util import touch_import - - - -class FixReduce(fixer_base.BaseFix): - - BM_compatible = True - order = "pre" - - PATTERN = """ - power< 'reduce' - trailer< '(' - arglist< ( - (not(argument) any ',' - not(argument - > - """ - - def transform(self, node, results): - touch_import(u'functools', u'reduce', node) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_renames.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_renames.py deleted file mode 100644 index 4bcce8c4..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_renames.py +++ /dev/null @@ -1,70 +0,0 @@ -"""Fix incompatible renames - -Fixes: - * sys.maxint -> sys.maxsize -""" -# Author: Christian Heimes -# based on Collin Winter's fix_import - -# Local imports -from .. import fixer_base -from ..fixer_util import Name, attr_chain - -MAPPING = {"sys": {"maxint" : "maxsize"}, - } -LOOKUP = {} - -def alternates(members): - return "(" + "|".join(map(repr, members)) + ")" - - -def build_pattern(): - #bare = set() - for module, replace in MAPPING.items(): - for old_attr, new_attr in replace.items(): - LOOKUP[(module, old_attr)] = new_attr - #bare.add(module) - #bare.add(old_attr) - #yield """ - # import_name< 'import' (module=%r - # | dotted_as_names< any* module=%r any* >) > - # """ % (module, module) - yield """ - import_from< 'from' module_name=%r 'import' - ( attr_name=%r | import_as_name< attr_name=%r 'as' any >) > - """ % (module, old_attr, old_attr) - yield """ - power< module_name=%r trailer< '.' attr_name=%r > any* > - """ % (module, old_attr) - #yield """bare_name=%s""" % alternates(bare) - - -class FixRenames(fixer_base.BaseFix): - BM_compatible = True - PATTERN = "|".join(build_pattern()) - - order = "pre" # Pre-order tree traversal - - # Don't match the node if it's within another match - def match(self, node): - match = super(FixRenames, self).match - results = match(node) - if results: - if any(match(obj) for obj in attr_chain(node, "parent")): - return False - return results - return False - - #def start_tree(self, tree, filename): - # super(FixRenames, self).start_tree(tree, filename) - # self.replace = {} - - def transform(self, node, results): - mod_name = results.get("module_name") - attr_name = results.get("attr_name") - #bare_name = results.get("bare_name") - #import_mod = results.get("module") - - if mod_name and attr_name: - new_attr = unicode(LOOKUP[(mod_name.value, attr_name.value)]) - attr_name.replace(Name(new_attr, prefix=attr_name.prefix)) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_repr.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_repr.py deleted file mode 100644 index f3436564..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_repr.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2006 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Fixer that transforms `xyzzy` into repr(xyzzy).""" - -# Local imports -from .. import fixer_base -from ..fixer_util import Call, Name, parenthesize - - -class FixRepr(fixer_base.BaseFix): - - BM_compatible = True - PATTERN = """ - atom < '`' expr=any '`' > - """ - - def transform(self, node, results): - expr = results["expr"].clone() - - if expr.type == self.syms.testlist1: - expr = parenthesize(expr) - return Call(Name(u"repr"), [expr], prefix=node.prefix) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_set_literal.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_set_literal.py deleted file mode 100644 index d3d38ec4..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_set_literal.py +++ /dev/null @@ -1,53 +0,0 @@ -""" -Optional fixer to transform set() calls to set literals. -""" - -# Author: Benjamin Peterson - -from lib2to3 import fixer_base, pytree -from lib2to3.fixer_util import token, syms - - - -class FixSetLiteral(fixer_base.BaseFix): - - BM_compatible = True - explicit = True - - PATTERN = """power< 'set' trailer< '(' - (atom=atom< '[' (items=listmaker< any ((',' any)* [',']) > - | - single=any) ']' > - | - atom< '(' items=testlist_gexp< any ((',' any)* [',']) > ')' > - ) - ')' > > - """ - - def transform(self, node, results): - single = results.get("single") - if single: - # Make a fake listmaker - fake = pytree.Node(syms.listmaker, [single.clone()]) - single.replace(fake) - items = fake - else: - items = results["items"] - - # Build the contents of the literal - literal = [pytree.Leaf(token.LBRACE, u"{")] - literal.extend(n.clone() for n in items.children) - literal.append(pytree.Leaf(token.RBRACE, u"}")) - # Set the prefix of the right brace to that of the ')' or ']' - literal[-1].prefix = items.next_sibling.prefix - maker = pytree.Node(syms.dictsetmaker, literal) - maker.prefix = node.prefix - - # If the original was a one tuple, we need to remove the extra comma. - if len(maker.children) == 4: - n = maker.children[2] - n.remove() - maker.children[-1].prefix = n.prefix - - # Finally, replace the set call with our shiny new literal. - return maker diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_standarderror.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_standarderror.py deleted file mode 100644 index 6cad5111..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_standarderror.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2007 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Fixer for StandardError -> Exception.""" - -# Local imports -from .. import fixer_base -from ..fixer_util import Name - - -class FixStandarderror(fixer_base.BaseFix): - BM_compatible = True - PATTERN = """ - 'StandardError' - """ - - def transform(self, node, results): - return Name(u"Exception", prefix=node.prefix) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_sys_exc.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_sys_exc.py deleted file mode 100644 index 2ecca2b5..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_sys_exc.py +++ /dev/null @@ -1,30 +0,0 @@ -"""Fixer for sys.exc_{type, value, traceback} - -sys.exc_type -> sys.exc_info()[0] -sys.exc_value -> sys.exc_info()[1] -sys.exc_traceback -> sys.exc_info()[2] -""" - -# By Jeff Balogh and Benjamin Peterson - -# Local imports -from .. import fixer_base -from ..fixer_util import Attr, Call, Name, Number, Subscript, Node, syms - -class FixSysExc(fixer_base.BaseFix): - # This order matches the ordering of sys.exc_info(). - exc_info = [u"exc_type", u"exc_value", u"exc_traceback"] - BM_compatible = True - PATTERN = """ - power< 'sys' trailer< dot='.' attribute=(%s) > > - """ % '|'.join("'%s'" % e for e in exc_info) - - def transform(self, node, results): - sys_attr = results["attribute"][0] - index = Number(self.exc_info.index(sys_attr.value)) - - call = Call(Name(u"exc_info"), prefix=sys_attr.prefix) - attr = Attr(Name(u"sys"), call) - attr[1].children[0].prefix = results["dot"].prefix - attr.append(Subscript(index)) - return Node(syms.power, attr, prefix=node.prefix) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_throw.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_throw.py deleted file mode 100644 index 1468d89a..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_throw.py +++ /dev/null @@ -1,56 +0,0 @@ -"""Fixer for generator.throw(E, V, T). - -g.throw(E) -> g.throw(E) -g.throw(E, V) -> g.throw(E(V)) -g.throw(E, V, T) -> g.throw(E(V).with_traceback(T)) - -g.throw("foo"[, V[, T]]) will warn about string exceptions.""" -# Author: Collin Winter - -# Local imports -from .. import pytree -from ..pgen2 import token -from .. import fixer_base -from ..fixer_util import Name, Call, ArgList, Attr, is_tuple - -class FixThrow(fixer_base.BaseFix): - BM_compatible = True - PATTERN = """ - power< any trailer< '.' 'throw' > - trailer< '(' args=arglist< exc=any ',' val=any [',' tb=any] > ')' > - > - | - power< any trailer< '.' 'throw' > trailer< '(' exc=any ')' > > - """ - - def transform(self, node, results): - syms = self.syms - - exc = results["exc"].clone() - if exc.type is token.STRING: - self.cannot_convert(node, "Python 3 does not support string exceptions") - return - - # Leave "g.throw(E)" alone - val = results.get(u"val") - if val is None: - return - - val = val.clone() - if is_tuple(val): - args = [c.clone() for c in val.children[1:-1]] - else: - val.prefix = u"" - args = [val] - - throw_args = results["args"] - - if "tb" in results: - tb = results["tb"].clone() - tb.prefix = u"" - - e = Call(exc, args) - with_tb = Attr(e, Name(u'with_traceback')) + [ArgList([tb])] - throw_args.replace(pytree.Node(syms.power, with_tb)) - else: - throw_args.replace(Call(exc, args)) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_tuple_params.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_tuple_params.py deleted file mode 100644 index 51b47865..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_tuple_params.py +++ /dev/null @@ -1,175 +0,0 @@ -"""Fixer for function definitions with tuple parameters. - -def func(((a, b), c), d): - ... - - -> - -def func(x, d): - ((a, b), c) = x - ... - -It will also support lambdas: - - lambda (x, y): x + y -> lambda t: t[0] + t[1] - - # The parens are a syntax error in Python 3 - lambda (x): x + y -> lambda x: x + y -""" -# Author: Collin Winter - -# Local imports -from .. import pytree -from ..pgen2 import token -from .. import fixer_base -from ..fixer_util import Assign, Name, Newline, Number, Subscript, syms - -def is_docstring(stmt): - return isinstance(stmt, pytree.Node) and \ - stmt.children[0].type == token.STRING - -class FixTupleParams(fixer_base.BaseFix): - run_order = 4 #use a lower order since lambda is part of other - #patterns - BM_compatible = True - - PATTERN = """ - funcdef< 'def' any parameters< '(' args=any ')' > - ['->' any] ':' suite=any+ > - | - lambda= - lambdef< 'lambda' args=vfpdef< '(' inner=any ')' > - ':' body=any - > - """ - - def transform(self, node, results): - if "lambda" in results: - return self.transform_lambda(node, results) - - new_lines = [] - suite = results["suite"] - args = results["args"] - # This is so "def foo(...): x = 5; y = 7" is handled correctly. - # TODO(cwinter): suite-cleanup - if suite[0].children[1].type == token.INDENT: - start = 2 - indent = suite[0].children[1].value - end = Newline() - else: - start = 0 - indent = u"; " - end = pytree.Leaf(token.INDENT, u"") - - # We need access to self for new_name(), and making this a method - # doesn't feel right. Closing over self and new_lines makes the - # code below cleaner. - def handle_tuple(tuple_arg, add_prefix=False): - n = Name(self.new_name()) - arg = tuple_arg.clone() - arg.prefix = u"" - stmt = Assign(arg, n.clone()) - if add_prefix: - n.prefix = u" " - tuple_arg.replace(n) - new_lines.append(pytree.Node(syms.simple_stmt, - [stmt, end.clone()])) - - if args.type == syms.tfpdef: - handle_tuple(args) - elif args.type == syms.typedargslist: - for i, arg in enumerate(args.children): - if arg.type == syms.tfpdef: - # Without add_prefix, the emitted code is correct, - # just ugly. - handle_tuple(arg, add_prefix=(i > 0)) - - if not new_lines: - return - - # This isn't strictly necessary, but it plays nicely with other fixers. - # TODO(cwinter) get rid of this when children becomes a smart list - for line in new_lines: - line.parent = suite[0] - - # TODO(cwinter) suite-cleanup - after = start - if start == 0: - new_lines[0].prefix = u" " - elif is_docstring(suite[0].children[start]): - new_lines[0].prefix = indent - after = start + 1 - - for line in new_lines: - line.parent = suite[0] - suite[0].children[after:after] = new_lines - for i in range(after+1, after+len(new_lines)+1): - suite[0].children[i].prefix = indent - suite[0].changed() - - def transform_lambda(self, node, results): - args = results["args"] - body = results["body"] - inner = simplify_args(results["inner"]) - - # Replace lambda ((((x)))): x with lambda x: x - if inner.type == token.NAME: - inner = inner.clone() - inner.prefix = u" " - args.replace(inner) - return - - params = find_params(args) - to_index = map_to_index(params) - tup_name = self.new_name(tuple_name(params)) - - new_param = Name(tup_name, prefix=u" ") - args.replace(new_param.clone()) - for n in body.post_order(): - if n.type == token.NAME and n.value in to_index: - subscripts = [c.clone() for c in to_index[n.value]] - new = pytree.Node(syms.power, - [new_param.clone()] + subscripts) - new.prefix = n.prefix - n.replace(new) - - -### Helper functions for transform_lambda() - -def simplify_args(node): - if node.type in (syms.vfplist, token.NAME): - return node - elif node.type == syms.vfpdef: - # These look like vfpdef< '(' x ')' > where x is NAME - # or another vfpdef instance (leading to recursion). - while node.type == syms.vfpdef: - node = node.children[1] - return node - raise RuntimeError("Received unexpected node %s" % node) - -def find_params(node): - if node.type == syms.vfpdef: - return find_params(node.children[1]) - elif node.type == token.NAME: - return node.value - return [find_params(c) for c in node.children if c.type != token.COMMA] - -def map_to_index(param_list, prefix=[], d=None): - if d is None: - d = {} - for i, obj in enumerate(param_list): - trailer = [Subscript(Number(unicode(i)))] - if isinstance(obj, list): - map_to_index(obj, trailer, d=d) - else: - d[obj] = prefix + trailer - return d - -def tuple_name(param_list): - l = [] - for obj in param_list: - if isinstance(obj, list): - l.append(tuple_name(obj)) - else: - l.append(obj) - return u"_".join(l) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_types.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_types.py deleted file mode 100644 index fc9d4959..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_types.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2007 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Fixer for removing uses of the types module. - -These work for only the known names in the types module. The forms above -can include types. or not. ie, It is assumed the module is imported either as: - - import types - from types import ... # either * or specific types - -The import statements are not modified. - -There should be another fixer that handles at least the following constants: - - type([]) -> list - type(()) -> tuple - type('') -> str - -""" - -# Local imports -from ..pgen2 import token -from .. import fixer_base -from ..fixer_util import Name - -_TYPE_MAPPING = { - 'BooleanType' : 'bool', - 'BufferType' : 'memoryview', - 'ClassType' : 'type', - 'ComplexType' : 'complex', - 'DictType': 'dict', - 'DictionaryType' : 'dict', - 'EllipsisType' : 'type(Ellipsis)', - #'FileType' : 'io.IOBase', - 'FloatType': 'float', - 'IntType': 'int', - 'ListType': 'list', - 'LongType': 'int', - 'ObjectType' : 'object', - 'NoneType': 'type(None)', - 'NotImplementedType' : 'type(NotImplemented)', - 'SliceType' : 'slice', - 'StringType': 'bytes', # XXX ? - 'StringTypes' : 'str', # XXX ? - 'TupleType': 'tuple', - 'TypeType' : 'type', - 'UnicodeType': 'str', - 'XRangeType' : 'range', - } - -_pats = ["power< 'types' trailer< '.' name='%s' > >" % t for t in _TYPE_MAPPING] - -class FixTypes(fixer_base.BaseFix): - BM_compatible = True - PATTERN = '|'.join(_pats) - - def transform(self, node, results): - new_value = unicode(_TYPE_MAPPING.get(results["name"].value)) - if new_value: - return Name(new_value, prefix=node.prefix) - return None diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_unicode.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_unicode.py deleted file mode 100644 index 2d776f61..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_unicode.py +++ /dev/null @@ -1,42 +0,0 @@ -r"""Fixer for unicode. - -* Changes unicode to str and unichr to chr. - -* If "...\u..." is not unicode literal change it into "...\\u...". - -* Change u"..." into "...". - -""" - -from ..pgen2 import token -from .. import fixer_base - -_mapping = {u"unichr" : u"chr", u"unicode" : u"str"} - -class FixUnicode(fixer_base.BaseFix): - BM_compatible = True - PATTERN = "STRING | 'unicode' | 'unichr'" - - def start_tree(self, tree, filename): - super(FixUnicode, self).start_tree(tree, filename) - self.unicode_literals = 'unicode_literals' in tree.future_features - - def transform(self, node, results): - if node.type == token.NAME: - new = node.clone() - new.value = _mapping[node.value] - return new - elif node.type == token.STRING: - val = node.value - if not self.unicode_literals and val[0] in u'\'"' and u'\\' in val: - val = ur'\\'.join([ - v.replace(u'\\u', ur'\\u').replace(u'\\U', ur'\\U') - for v in val.split(ur'\\') - ]) - if val[0] in u'uU': - val = val[1:] - if val == node.value: - return node - new = node.clone() - new.value = val - return new diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_urllib.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_urllib.py deleted file mode 100644 index 34e1b270..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_urllib.py +++ /dev/null @@ -1,197 +0,0 @@ -"""Fix changes imports of urllib which are now incompatible. - This is rather similar to fix_imports, but because of the more - complex nature of the fixing for urllib, it has its own fixer. -""" -# Author: Nick Edds - -# Local imports -from lib2to3.fixes.fix_imports import alternates, FixImports -from lib2to3 import fixer_base -from lib2to3.fixer_util import (Name, Comma, FromImport, Newline, - find_indentation, Node, syms) - -MAPPING = {"urllib": [ - ("urllib.request", - ["URLopener", "FancyURLopener", "urlretrieve", - "_urlopener", "urlopen", "urlcleanup", - "pathname2url", "url2pathname"]), - ("urllib.parse", - ["quote", "quote_plus", "unquote", "unquote_plus", - "urlencode", "splitattr", "splithost", "splitnport", - "splitpasswd", "splitport", "splitquery", "splittag", - "splittype", "splituser", "splitvalue", ]), - ("urllib.error", - ["ContentTooShortError"])], - "urllib2" : [ - ("urllib.request", - ["urlopen", "install_opener", "build_opener", - "Request", "OpenerDirector", "BaseHandler", - "HTTPDefaultErrorHandler", "HTTPRedirectHandler", - "HTTPCookieProcessor", "ProxyHandler", - "HTTPPasswordMgr", - "HTTPPasswordMgrWithDefaultRealm", - "AbstractBasicAuthHandler", - "HTTPBasicAuthHandler", "ProxyBasicAuthHandler", - "AbstractDigestAuthHandler", - "HTTPDigestAuthHandler", "ProxyDigestAuthHandler", - "HTTPHandler", "HTTPSHandler", "FileHandler", - "FTPHandler", "CacheFTPHandler", - "UnknownHandler"]), - ("urllib.error", - ["URLError", "HTTPError"]), - ] -} - -# Duplicate the url parsing functions for urllib2. -MAPPING["urllib2"].append(MAPPING["urllib"][1]) - - -def build_pattern(): - bare = set() - for old_module, changes in MAPPING.items(): - for change in changes: - new_module, members = change - members = alternates(members) - yield """import_name< 'import' (module=%r - | dotted_as_names< any* module=%r any* >) > - """ % (old_module, old_module) - yield """import_from< 'from' mod_member=%r 'import' - ( member=%s | import_as_name< member=%s 'as' any > | - import_as_names< members=any* >) > - """ % (old_module, members, members) - yield """import_from< 'from' module_star=%r 'import' star='*' > - """ % old_module - yield """import_name< 'import' - dotted_as_name< module_as=%r 'as' any > > - """ % old_module - # bare_with_attr has a special significance for FixImports.match(). - yield """power< bare_with_attr=%r trailer< '.' member=%s > any* > - """ % (old_module, members) - - -class FixUrllib(FixImports): - - def build_pattern(self): - return "|".join(build_pattern()) - - def transform_import(self, node, results): - """Transform for the basic import case. Replaces the old - import name with a comma separated list of its - replacements. - """ - import_mod = results.get("module") - pref = import_mod.prefix - - names = [] - - # create a Node list of the replacement modules - for name in MAPPING[import_mod.value][:-1]: - names.extend([Name(name[0], prefix=pref), Comma()]) - names.append(Name(MAPPING[import_mod.value][-1][0], prefix=pref)) - import_mod.replace(names) - - def transform_member(self, node, results): - """Transform for imports of specific module elements. Replaces - the module to be imported from with the appropriate new - module. - """ - mod_member = results.get("mod_member") - pref = mod_member.prefix - member = results.get("member") - - # Simple case with only a single member being imported - if member: - # this may be a list of length one, or just a node - if isinstance(member, list): - member = member[0] - new_name = None - for change in MAPPING[mod_member.value]: - if member.value in change[1]: - new_name = change[0] - break - if new_name: - mod_member.replace(Name(new_name, prefix=pref)) - else: - self.cannot_convert(node, "This is an invalid module element") - - # Multiple members being imported - else: - # a dictionary for replacements, order matters - modules = [] - mod_dict = {} - members = results["members"] - for member in members: - # we only care about the actual members - if member.type == syms.import_as_name: - as_name = member.children[2].value - member_name = member.children[0].value - else: - member_name = member.value - as_name = None - if member_name != u",": - for change in MAPPING[mod_member.value]: - if member_name in change[1]: - if change[0] not in mod_dict: - modules.append(change[0]) - mod_dict.setdefault(change[0], []).append(member) - - new_nodes = [] - indentation = find_indentation(node) - first = True - def handle_name(name, prefix): - if name.type == syms.import_as_name: - kids = [Name(name.children[0].value, prefix=prefix), - name.children[1].clone(), - name.children[2].clone()] - return [Node(syms.import_as_name, kids)] - return [Name(name.value, prefix=prefix)] - for module in modules: - elts = mod_dict[module] - names = [] - for elt in elts[:-1]: - names.extend(handle_name(elt, pref)) - names.append(Comma()) - names.extend(handle_name(elts[-1], pref)) - new = FromImport(module, names) - if not first or node.parent.prefix.endswith(indentation): - new.prefix = indentation - new_nodes.append(new) - first = False - if new_nodes: - nodes = [] - for new_node in new_nodes[:-1]: - nodes.extend([new_node, Newline()]) - nodes.append(new_nodes[-1]) - node.replace(nodes) - else: - self.cannot_convert(node, "All module elements are invalid") - - def transform_dot(self, node, results): - """Transform for calls to module members in code.""" - module_dot = results.get("bare_with_attr") - member = results.get("member") - new_name = None - if isinstance(member, list): - member = member[0] - for change in MAPPING[module_dot.value]: - if member.value in change[1]: - new_name = change[0] - break - if new_name: - module_dot.replace(Name(new_name, - prefix=module_dot.prefix)) - else: - self.cannot_convert(node, "This is an invalid module element") - - def transform(self, node, results): - if results.get("module"): - self.transform_import(node, results) - elif results.get("mod_member"): - self.transform_member(node, results) - elif results.get("bare_with_attr"): - self.transform_dot(node, results) - # Renaming and star imports are not supported for these modules. - elif results.get("module_star"): - self.cannot_convert(node, "Cannot handle star imports.") - elif results.get("module_as"): - self.cannot_convert(node, "This module is now multiple modules") diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_ws_comma.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_ws_comma.py deleted file mode 100644 index 37ff6244..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_ws_comma.py +++ /dev/null @@ -1,39 +0,0 @@ -"""Fixer that changes 'a ,b' into 'a, b'. - -This also changes '{a :b}' into '{a: b}', but does not touch other -uses of colons. It does not touch other uses of whitespace. - -""" - -from .. import pytree -from ..pgen2 import token -from .. import fixer_base - -class FixWsComma(fixer_base.BaseFix): - - explicit = True # The user must ask for this fixers - - PATTERN = """ - any<(not(',') any)+ ',' ((not(',') any)+ ',')* [not(',') any]> - """ - - COMMA = pytree.Leaf(token.COMMA, u",") - COLON = pytree.Leaf(token.COLON, u":") - SEPS = (COMMA, COLON) - - def transform(self, node, results): - new = node.clone() - comma = False - for child in new.children: - if child in self.SEPS: - prefix = child.prefix - if prefix.isspace() and u"\n" not in prefix: - child.prefix = u"" - comma = True - else: - if comma: - prefix = child.prefix - if not prefix: - child.prefix = u" " - comma = False - return new diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_xrange.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_xrange.py deleted file mode 100644 index f1436724..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_xrange.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright 2007 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Fixer that changes xrange(...) into range(...).""" - -# Local imports -from .. import fixer_base -from ..fixer_util import Name, Call, consuming_calls -from .. import patcomp - - -class FixXrange(fixer_base.BaseFix): - BM_compatible = True - PATTERN = """ - power< - (name='range'|name='xrange') trailer< '(' args=any ')' > - rest=any* > - """ - - def start_tree(self, tree, filename): - super(FixXrange, self).start_tree(tree, filename) - self.transformed_xranges = set() - - def finish_tree(self, tree, filename): - self.transformed_xranges = None - - def transform(self, node, results): - name = results["name"] - if name.value == u"xrange": - return self.transform_xrange(node, results) - elif name.value == u"range": - return self.transform_range(node, results) - else: - raise ValueError(repr(name)) - - def transform_xrange(self, node, results): - name = results["name"] - name.replace(Name(u"range", prefix=name.prefix)) - # This prevents the new range call from being wrapped in a list later. - self.transformed_xranges.add(id(node)) - - def transform_range(self, node, results): - if (id(node) not in self.transformed_xranges and - not self.in_special_context(node)): - range_call = Call(Name(u"range"), [results["args"].clone()]) - # Encase the range call in list(). - list_call = Call(Name(u"list"), [range_call], - prefix=node.prefix) - # Put things that were after the range() call after the list call. - for n in results["rest"]: - list_call.append_child(n) - return list_call - - P1 = "power< func=NAME trailer< '(' node=any ')' > any* >" - p1 = patcomp.compile_pattern(P1) - - P2 = """for_stmt< 'for' any 'in' node=any ':' any* > - | comp_for< 'for' any 'in' node=any any* > - | comparison< any 'in' node=any any*> - """ - p2 = patcomp.compile_pattern(P2) - - def in_special_context(self, node): - if node.parent is None: - return False - results = {} - if (node.parent.parent is not None and - self.p1.match(node.parent.parent, results) and - results["node"] is node): - # list(d.keys()) -> list(d.keys()), etc. - return results["func"].value in consuming_calls - # for ... in d.iterkeys() -> for ... in d.keys(), etc. - return self.p2.match(node.parent, results) and results["node"] is node diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_xreadlines.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_xreadlines.py deleted file mode 100644 index f50b9a27..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_xreadlines.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Fix "for x in f.xreadlines()" -> "for x in f". - -This fixer will also convert g(f.xreadlines) into g(f.__iter__).""" -# Author: Collin Winter - -# Local imports -from .. import fixer_base -from ..fixer_util import Name - - -class FixXreadlines(fixer_base.BaseFix): - BM_compatible = True - PATTERN = """ - power< call=any+ trailer< '.' 'xreadlines' > trailer< '(' ')' > > - | - power< any+ trailer< '.' no_call='xreadlines' > > - """ - - def transform(self, node, results): - no_call = results.get("no_call") - - if no_call: - no_call.replace(Name(u"__iter__", prefix=no_call.prefix)) - else: - node.replace([x.clone() for x in results["call"]]) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_zip.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_zip.py deleted file mode 100644 index c5d7b66d..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/fixes/fix_zip.py +++ /dev/null @@ -1,35 +0,0 @@ -""" -Fixer that changes zip(seq0, seq1, ...) into list(zip(seq0, seq1, ...) -unless there exists a 'from future_builtins import zip' statement in the -top-level namespace. - -We avoid the transformation if the zip() call is directly contained in -iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or for V in <>:. -""" - -# Local imports -from .. import fixer_base -from ..fixer_util import Name, Call, in_special_context - -class FixZip(fixer_base.ConditionalFix): - - BM_compatible = True - PATTERN = """ - power< 'zip' args=trailer< '(' [any] ')' > - > - """ - - skip_on = "future_builtins.zip" - - def transform(self, node, results): - if self.should_skip(node): - return - - if in_special_context(node): - return None - - new = node.clone() - new.prefix = u"" - new = Call(Name(u"list"), [new]) - new.prefix = node.prefix - return new diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/main.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/main.py deleted file mode 100644 index ad0625e5..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/main.py +++ /dev/null @@ -1,269 +0,0 @@ -""" -Main program for 2to3. -""" - -from __future__ import with_statement - -import sys -import os -import difflib -import logging -import shutil -import optparse - -from . import refactor - - -def diff_texts(a, b, filename): - """Return a unified diff of two strings.""" - a = a.splitlines() - b = b.splitlines() - return difflib.unified_diff(a, b, filename, filename, - "(original)", "(refactored)", - lineterm="") - - -class StdoutRefactoringTool(refactor.MultiprocessRefactoringTool): - """ - A refactoring tool that can avoid overwriting its input files. - Prints output to stdout. - - Output files can optionally be written to a different directory and or - have an extra file suffix appended to their name for use in situations - where you do not want to replace the input files. - """ - - def __init__(self, fixers, options, explicit, nobackups, show_diffs, - input_base_dir='', output_dir='', append_suffix=''): - """ - Args: - fixers: A list of fixers to import. - options: A dict with RefactoringTool configuration. - explicit: A list of fixers to run even if they are explicit. - nobackups: If true no backup '.bak' files will be created for those - files that are being refactored. - show_diffs: Should diffs of the refactoring be printed to stdout? - input_base_dir: The base directory for all input files. This class - will strip this path prefix off of filenames before substituting - it with output_dir. Only meaningful if output_dir is supplied. - All files processed by refactor() must start with this path. - output_dir: If supplied, all converted files will be written into - this directory tree instead of input_base_dir. - append_suffix: If supplied, all files output by this tool will have - this appended to their filename. Useful for changing .py to - .py3 for example by passing append_suffix='3'. - """ - self.nobackups = nobackups - self.show_diffs = show_diffs - if input_base_dir and not input_base_dir.endswith(os.sep): - input_base_dir += os.sep - self._input_base_dir = input_base_dir - self._output_dir = output_dir - self._append_suffix = append_suffix - super(StdoutRefactoringTool, self).__init__(fixers, options, explicit) - - def log_error(self, msg, *args, **kwargs): - self.errors.append((msg, args, kwargs)) - self.logger.error(msg, *args, **kwargs) - - def write_file(self, new_text, filename, old_text, encoding): - orig_filename = filename - if self._output_dir: - if filename.startswith(self._input_base_dir): - filename = os.path.join(self._output_dir, - filename[len(self._input_base_dir):]) - else: - raise ValueError('filename %s does not start with the ' - 'input_base_dir %s' % ( - filename, self._input_base_dir)) - if self._append_suffix: - filename += self._append_suffix - if orig_filename != filename: - output_dir = os.path.dirname(filename) - if not os.path.isdir(output_dir): - os.makedirs(output_dir) - self.log_message('Writing converted %s to %s.', orig_filename, - filename) - if not self.nobackups: - # Make backup - backup = filename + ".bak" - if os.path.lexists(backup): - try: - os.remove(backup) - except os.error, err: - self.log_message("Can't remove backup %s", backup) - try: - os.rename(filename, backup) - except os.error, err: - self.log_message("Can't rename %s to %s", filename, backup) - # Actually write the new file - write = super(StdoutRefactoringTool, self).write_file - write(new_text, filename, old_text, encoding) - if not self.nobackups: - shutil.copymode(backup, filename) - if orig_filename != filename: - # Preserve the file mode in the new output directory. - shutil.copymode(orig_filename, filename) - - def print_output(self, old, new, filename, equal): - if equal: - self.log_message("No changes to %s", filename) - else: - self.log_message("Refactored %s", filename) - if self.show_diffs: - diff_lines = diff_texts(old, new, filename) - try: - if self.output_lock is not None: - with self.output_lock: - for line in diff_lines: - print line - sys.stdout.flush() - else: - for line in diff_lines: - print line - except UnicodeEncodeError: - warn("couldn't encode %s's diff for your terminal" % - (filename,)) - return - - -def warn(msg): - print >> sys.stderr, "WARNING: %s" % (msg,) - - -def main(fixer_pkg, args=None): - """Main program. - - Args: - fixer_pkg: the name of a package where the fixers are located. - args: optional; a list of command line arguments. If omitted, - sys.argv[1:] is used. - - Returns a suggested exit status (0, 1, 2). - """ - # Set up option parser - parser = optparse.OptionParser(usage="2to3 [options] file|dir ...") - parser.add_option("-d", "--doctests_only", action="store_true", - help="Fix up doctests only") - parser.add_option("-f", "--fix", action="append", default=[], - help="Each FIX specifies a transformation; default: all") - parser.add_option("-j", "--processes", action="store", default=1, - type="int", help="Run 2to3 concurrently") - parser.add_option("-x", "--nofix", action="append", default=[], - help="Prevent a transformation from being run") - parser.add_option("-l", "--list-fixes", action="store_true", - help="List available transformations") - parser.add_option("-p", "--print-function", action="store_true", - help="Modify the grammar so that print() is a function") - parser.add_option("-v", "--verbose", action="store_true", - help="More verbose logging") - parser.add_option("--no-diffs", action="store_true", - help="Don't show diffs of the refactoring") - parser.add_option("-w", "--write", action="store_true", - help="Write back modified files") - parser.add_option("-n", "--nobackups", action="store_true", default=False, - help="Don't write backups for modified files") - parser.add_option("-o", "--output-dir", action="store", type="str", - default="", help="Put output files in this directory " - "instead of overwriting the input files. Requires -n.") - parser.add_option("-W", "--write-unchanged-files", action="store_true", - help="Also write files even if no changes were required" - " (useful with --output-dir); implies -w.") - parser.add_option("--add-suffix", action="store", type="str", default="", - help="Append this string to all output filenames." - " Requires -n if non-empty. " - "ex: --add-suffix='3' will generate .py3 files.") - - # Parse command line arguments - refactor_stdin = False - flags = {} - options, args = parser.parse_args(args) - if options.write_unchanged_files: - flags["write_unchanged_files"] = True - if not options.write: - warn("--write-unchanged-files/-W implies -w.") - options.write = True - # If we allowed these, the original files would be renamed to backup names - # but not replaced. - if options.output_dir and not options.nobackups: - parser.error("Can't use --output-dir/-o without -n.") - if options.add_suffix and not options.nobackups: - parser.error("Can't use --add-suffix without -n.") - - if not options.write and options.no_diffs: - warn("not writing files and not printing diffs; that's not very useful") - if not options.write and options.nobackups: - parser.error("Can't use -n without -w") - if options.list_fixes: - print "Available transformations for the -f/--fix option:" - for fixname in refactor.get_all_fix_names(fixer_pkg): - print fixname - if not args: - return 0 - if not args: - print >> sys.stderr, "At least one file or directory argument required." - print >> sys.stderr, "Use --help to show usage." - return 2 - if "-" in args: - refactor_stdin = True - if options.write: - print >> sys.stderr, "Can't write to stdin." - return 2 - if options.print_function: - flags["print_function"] = True - - # Set up logging handler - level = logging.DEBUG if options.verbose else logging.INFO - logging.basicConfig(format='%(name)s: %(message)s', level=level) - logger = logging.getLogger('lib2to3.main') - - # Initialize the refactoring tool - avail_fixes = set(refactor.get_fixers_from_package(fixer_pkg)) - unwanted_fixes = set(fixer_pkg + ".fix_" + fix for fix in options.nofix) - explicit = set() - if options.fix: - all_present = False - for fix in options.fix: - if fix == "all": - all_present = True - else: - explicit.add(fixer_pkg + ".fix_" + fix) - requested = avail_fixes.union(explicit) if all_present else explicit - else: - requested = avail_fixes.union(explicit) - fixer_names = requested.difference(unwanted_fixes) - input_base_dir = os.path.commonprefix(args) - if (input_base_dir and not input_base_dir.endswith(os.sep) - and not os.path.isdir(input_base_dir)): - # One or more similar names were passed, their directory is the base. - # os.path.commonprefix() is ignorant of path elements, this corrects - # for that weird API. - input_base_dir = os.path.dirname(input_base_dir) - if options.output_dir: - input_base_dir = input_base_dir.rstrip(os.sep) - logger.info('Output in %r will mirror the input directory %r layout.', - options.output_dir, input_base_dir) - rt = StdoutRefactoringTool( - sorted(fixer_names), flags, sorted(explicit), - options.nobackups, not options.no_diffs, - input_base_dir=input_base_dir, - output_dir=options.output_dir, - append_suffix=options.add_suffix) - - # Refactor all files and directories passed as arguments - if not rt.errors: - if refactor_stdin: - rt.refactor_stdin() - else: - try: - rt.refactor(args, options.write, options.doctests_only, - options.processes) - except refactor.MultiprocessingUnsupported: - assert options.processes > 1 - print >> sys.stderr, "Sorry, -j isn't " \ - "supported on this platform." - return 1 - rt.summarize() - - # Return error status (0 if rt.errors is zero) - return int(bool(rt.errors)) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/patcomp.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/patcomp.py deleted file mode 100644 index 093e5f9f..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/patcomp.py +++ /dev/null @@ -1,205 +0,0 @@ -# Copyright 2006 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Pattern compiler. - -The grammer is taken from PatternGrammar.txt. - -The compiler compiles a pattern to a pytree.*Pattern instance. -""" - -__author__ = "Guido van Rossum " - -# Python imports -import os -import StringIO - -# Fairly local imports -from .pgen2 import driver, literals, token, tokenize, parse, grammar - -# Really local imports -from . import pytree -from . import pygram - -# The pattern grammar file -_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), - "PatternGrammar.txt") - - -class PatternSyntaxError(Exception): - pass - - -def tokenize_wrapper(input): - """Tokenizes a string suppressing significant whitespace.""" - skip = set((token.NEWLINE, token.INDENT, token.DEDENT)) - tokens = tokenize.generate_tokens(StringIO.StringIO(input).readline) - for quintuple in tokens: - type, value, start, end, line_text = quintuple - if type not in skip: - yield quintuple - - -class PatternCompiler(object): - - def __init__(self, grammar_file=_PATTERN_GRAMMAR_FILE): - """Initializer. - - Takes an optional alternative filename for the pattern grammar. - """ - self.grammar = driver.load_grammar(grammar_file) - self.syms = pygram.Symbols(self.grammar) - self.pygrammar = pygram.python_grammar - self.pysyms = pygram.python_symbols - self.driver = driver.Driver(self.grammar, convert=pattern_convert) - - def compile_pattern(self, input, debug=False, with_tree=False): - """Compiles a pattern string to a nested pytree.*Pattern object.""" - tokens = tokenize_wrapper(input) - try: - root = self.driver.parse_tokens(tokens, debug=debug) - except parse.ParseError as e: - raise PatternSyntaxError(str(e)) - if with_tree: - return self.compile_node(root), root - else: - return self.compile_node(root) - - def compile_node(self, node): - """Compiles a node, recursively. - - This is one big switch on the node type. - """ - # XXX Optimize certain Wildcard-containing-Wildcard patterns - # that can be merged - if node.type == self.syms.Matcher: - node = node.children[0] # Avoid unneeded recursion - - if node.type == self.syms.Alternatives: - # Skip the odd children since they are just '|' tokens - alts = [self.compile_node(ch) for ch in node.children[::2]] - if len(alts) == 1: - return alts[0] - p = pytree.WildcardPattern([[a] for a in alts], min=1, max=1) - return p.optimize() - - if node.type == self.syms.Alternative: - units = [self.compile_node(ch) for ch in node.children] - if len(units) == 1: - return units[0] - p = pytree.WildcardPattern([units], min=1, max=1) - return p.optimize() - - if node.type == self.syms.NegatedUnit: - pattern = self.compile_basic(node.children[1:]) - p = pytree.NegatedPattern(pattern) - return p.optimize() - - assert node.type == self.syms.Unit - - name = None - nodes = node.children - if len(nodes) >= 3 and nodes[1].type == token.EQUAL: - name = nodes[0].value - nodes = nodes[2:] - repeat = None - if len(nodes) >= 2 and nodes[-1].type == self.syms.Repeater: - repeat = nodes[-1] - nodes = nodes[:-1] - - # Now we've reduced it to: STRING | NAME [Details] | (...) | [...] - pattern = self.compile_basic(nodes, repeat) - - if repeat is not None: - assert repeat.type == self.syms.Repeater - children = repeat.children - child = children[0] - if child.type == token.STAR: - min = 0 - max = pytree.HUGE - elif child.type == token.PLUS: - min = 1 - max = pytree.HUGE - elif child.type == token.LBRACE: - assert children[-1].type == token.RBRACE - assert len(children) in (3, 5) - min = max = self.get_int(children[1]) - if len(children) == 5: - max = self.get_int(children[3]) - else: - assert False - if min != 1 or max != 1: - pattern = pattern.optimize() - pattern = pytree.WildcardPattern([[pattern]], min=min, max=max) - - if name is not None: - pattern.name = name - return pattern.optimize() - - def compile_basic(self, nodes, repeat=None): - # Compile STRING | NAME [Details] | (...) | [...] - assert len(nodes) >= 1 - node = nodes[0] - if node.type == token.STRING: - value = unicode(literals.evalString(node.value)) - return pytree.LeafPattern(_type_of_literal(value), value) - elif node.type == token.NAME: - value = node.value - if value.isupper(): - if value not in TOKEN_MAP: - raise PatternSyntaxError("Invalid token: %r" % value) - if nodes[1:]: - raise PatternSyntaxError("Can't have details for token") - return pytree.LeafPattern(TOKEN_MAP[value]) - else: - if value == "any": - type = None - elif not value.startswith("_"): - type = getattr(self.pysyms, value, None) - if type is None: - raise PatternSyntaxError("Invalid symbol: %r" % value) - if nodes[1:]: # Details present - content = [self.compile_node(nodes[1].children[1])] - else: - content = None - return pytree.NodePattern(type, content) - elif node.value == "(": - return self.compile_node(nodes[1]) - elif node.value == "[": - assert repeat is None - subpattern = self.compile_node(nodes[1]) - return pytree.WildcardPattern([[subpattern]], min=0, max=1) - assert False, node - - def get_int(self, node): - assert node.type == token.NUMBER - return int(node.value) - - -# Map named tokens to the type value for a LeafPattern -TOKEN_MAP = {"NAME": token.NAME, - "STRING": token.STRING, - "NUMBER": token.NUMBER, - "TOKEN": None} - - -def _type_of_literal(value): - if value[0].isalpha(): - return token.NAME - elif value in grammar.opmap: - return grammar.opmap[value] - else: - return None - - -def pattern_convert(grammar, raw_node_info): - """Converts raw node information to a Node or Leaf instance.""" - type, value, context, children = raw_node_info - if children or type in grammar.number2symbol: - return pytree.Node(type, children, context=context) - else: - return pytree.Leaf(type, value, context=context) - - -def compile_pattern(pattern): - return PatternCompiler().compile_pattern(pattern) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/__init__.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/__init__.py deleted file mode 100644 index af390484..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""The pgen2 package.""" diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/conv.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/conv.py deleted file mode 100644 index 28fbb0b9..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/conv.py +++ /dev/null @@ -1,257 +0,0 @@ -# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Convert graminit.[ch] spit out by pgen to Python code. - -Pgen is the Python parser generator. It is useful to quickly create a -parser from a grammar file in Python's grammar notation. But I don't -want my parsers to be written in C (yet), so I'm translating the -parsing tables to Python data structures and writing a Python parse -engine. - -Note that the token numbers are constants determined by the standard -Python tokenizer. The standard token module defines these numbers and -their names (the names are not used much). The token numbers are -hardcoded into the Python tokenizer and into pgen. A Python -implementation of the Python tokenizer is also available, in the -standard tokenize module. - -On the other hand, symbol numbers (representing the grammar's -non-terminals) are assigned by pgen based on the actual grammar -input. - -Note: this module is pretty much obsolete; the pgen module generates -equivalent grammar tables directly from the Grammar.txt input file -without having to invoke the Python pgen C program. - -""" - -# Python imports -import re - -# Local imports -from pgen2 import grammar, token - - -class Converter(grammar.Grammar): - """Grammar subclass that reads classic pgen output files. - - The run() method reads the tables as produced by the pgen parser - generator, typically contained in two C files, graminit.h and - graminit.c. The other methods are for internal use only. - - See the base class for more documentation. - - """ - - def run(self, graminit_h, graminit_c): - """Load the grammar tables from the text files written by pgen.""" - self.parse_graminit_h(graminit_h) - self.parse_graminit_c(graminit_c) - self.finish_off() - - def parse_graminit_h(self, filename): - """Parse the .h file written by pgen. (Internal) - - This file is a sequence of #define statements defining the - nonterminals of the grammar as numbers. We build two tables - mapping the numbers to names and back. - - """ - try: - f = open(filename) - except IOError, err: - print "Can't open %s: %s" % (filename, err) - return False - self.symbol2number = {} - self.number2symbol = {} - lineno = 0 - for line in f: - lineno += 1 - mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line) - if not mo and line.strip(): - print "%s(%s): can't parse %s" % (filename, lineno, - line.strip()) - else: - symbol, number = mo.groups() - number = int(number) - assert symbol not in self.symbol2number - assert number not in self.number2symbol - self.symbol2number[symbol] = number - self.number2symbol[number] = symbol - return True - - def parse_graminit_c(self, filename): - """Parse the .c file written by pgen. (Internal) - - The file looks as follows. The first two lines are always this: - - #include "pgenheaders.h" - #include "grammar.h" - - After that come four blocks: - - 1) one or more state definitions - 2) a table defining dfas - 3) a table defining labels - 4) a struct defining the grammar - - A state definition has the following form: - - one or more arc arrays, each of the form: - static arc arcs__[] = { - {, }, - ... - }; - - followed by a state array, of the form: - static state states_[] = { - {, arcs__}, - ... - }; - - """ - try: - f = open(filename) - except IOError, err: - print "Can't open %s: %s" % (filename, err) - return False - # The code below essentially uses f's iterator-ness! - lineno = 0 - - # Expect the two #include lines - lineno, line = lineno+1, f.next() - assert line == '#include "pgenheaders.h"\n', (lineno, line) - lineno, line = lineno+1, f.next() - assert line == '#include "grammar.h"\n', (lineno, line) - - # Parse the state definitions - lineno, line = lineno+1, f.next() - allarcs = {} - states = [] - while line.startswith("static arc "): - while line.startswith("static arc "): - mo = re.match(r"static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$", - line) - assert mo, (lineno, line) - n, m, k = map(int, mo.groups()) - arcs = [] - for _ in range(k): - lineno, line = lineno+1, f.next() - mo = re.match(r"\s+{(\d+), (\d+)},$", line) - assert mo, (lineno, line) - i, j = map(int, mo.groups()) - arcs.append((i, j)) - lineno, line = lineno+1, f.next() - assert line == "};\n", (lineno, line) - allarcs[(n, m)] = arcs - lineno, line = lineno+1, f.next() - mo = re.match(r"static state states_(\d+)\[(\d+)\] = {$", line) - assert mo, (lineno, line) - s, t = map(int, mo.groups()) - assert s == len(states), (lineno, line) - state = [] - for _ in range(t): - lineno, line = lineno+1, f.next() - mo = re.match(r"\s+{(\d+), arcs_(\d+)_(\d+)},$", line) - assert mo, (lineno, line) - k, n, m = map(int, mo.groups()) - arcs = allarcs[n, m] - assert k == len(arcs), (lineno, line) - state.append(arcs) - states.append(state) - lineno, line = lineno+1, f.next() - assert line == "};\n", (lineno, line) - lineno, line = lineno+1, f.next() - self.states = states - - # Parse the dfas - dfas = {} - mo = re.match(r"static dfa dfas\[(\d+)\] = {$", line) - assert mo, (lineno, line) - ndfas = int(mo.group(1)) - for i in range(ndfas): - lineno, line = lineno+1, f.next() - mo = re.match(r'\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$', - line) - assert mo, (lineno, line) - symbol = mo.group(2) - number, x, y, z = map(int, mo.group(1, 3, 4, 5)) - assert self.symbol2number[symbol] == number, (lineno, line) - assert self.number2symbol[number] == symbol, (lineno, line) - assert x == 0, (lineno, line) - state = states[z] - assert y == len(state), (lineno, line) - lineno, line = lineno+1, f.next() - mo = re.match(r'\s+("(?:\\\d\d\d)*")},$', line) - assert mo, (lineno, line) - first = {} - rawbitset = eval(mo.group(1)) - for i, c in enumerate(rawbitset): - byte = ord(c) - for j in range(8): - if byte & (1<= os.path.getmtime(b) - - -def main(*args): - """Main program, when run as a script: produce grammar pickle files. - - Calls load_grammar for each argument, a path to a grammar text file. - """ - if not args: - args = sys.argv[1:] - logging.basicConfig(level=logging.INFO, stream=sys.stdout, - format='%(message)s') - for gt in args: - load_grammar(gt, save=True, force=True) - return True - -if __name__ == "__main__": - sys.exit(int(not main())) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/grammar.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/grammar.py deleted file mode 100644 index 1aa5c432..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/grammar.py +++ /dev/null @@ -1,184 +0,0 @@ -# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""This module defines the data structures used to represent a grammar. - -These are a bit arcane because they are derived from the data -structures used by Python's 'pgen' parser generator. - -There's also a table here mapping operators to their names in the -token module; the Python tokenize module reports all operators as the -fallback token code OP, but the parser needs the actual token code. - -""" - -# Python imports -import pickle - -# Local imports -from . import token, tokenize - - -class Grammar(object): - """Pgen parsing tables conversion class. - - Once initialized, this class supplies the grammar tables for the - parsing engine implemented by parse.py. The parsing engine - accesses the instance variables directly. The class here does not - provide initialization of the tables; several subclasses exist to - do this (see the conv and pgen modules). - - The load() method reads the tables from a pickle file, which is - much faster than the other ways offered by subclasses. The pickle - file is written by calling dump() (after loading the grammar - tables using a subclass). The report() method prints a readable - representation of the tables to stdout, for debugging. - - The instance variables are as follows: - - symbol2number -- a dict mapping symbol names to numbers. Symbol - numbers are always 256 or higher, to distinguish - them from token numbers, which are between 0 and - 255 (inclusive). - - number2symbol -- a dict mapping numbers to symbol names; - these two are each other's inverse. - - states -- a list of DFAs, where each DFA is a list of - states, each state is a list of arcs, and each - arc is a (i, j) pair where i is a label and j is - a state number. The DFA number is the index into - this list. (This name is slightly confusing.) - Final states are represented by a special arc of - the form (0, j) where j is its own state number. - - dfas -- a dict mapping symbol numbers to (DFA, first) - pairs, where DFA is an item from the states list - above, and first is a set of tokens that can - begin this grammar rule (represented by a dict - whose values are always 1). - - labels -- a list of (x, y) pairs where x is either a token - number or a symbol number, and y is either None - or a string; the strings are keywords. The label - number is the index in this list; label numbers - are used to mark state transitions (arcs) in the - DFAs. - - start -- the number of the grammar's start symbol. - - keywords -- a dict mapping keyword strings to arc labels. - - tokens -- a dict mapping token numbers to arc labels. - - """ - - def __init__(self): - self.symbol2number = {} - self.number2symbol = {} - self.states = [] - self.dfas = {} - self.labels = [(0, "EMPTY")] - self.keywords = {} - self.tokens = {} - self.symbol2label = {} - self.start = 256 - - def dump(self, filename): - """Dump the grammar tables to a pickle file.""" - f = open(filename, "wb") - pickle.dump(self.__dict__, f, 2) - f.close() - - def load(self, filename): - """Load the grammar tables from a pickle file.""" - f = open(filename, "rb") - d = pickle.load(f) - f.close() - self.__dict__.update(d) - - def copy(self): - """ - Copy the grammar. - """ - new = self.__class__() - for dict_attr in ("symbol2number", "number2symbol", "dfas", "keywords", - "tokens", "symbol2label"): - setattr(new, dict_attr, getattr(self, dict_attr).copy()) - new.labels = self.labels[:] - new.states = self.states[:] - new.start = self.start - return new - - def report(self): - """Dump the grammar tables to standard output, for debugging.""" - from pprint import pprint - print "s2n" - pprint(self.symbol2number) - print "n2s" - pprint(self.number2symbol) - print "states" - pprint(self.states) - print "dfas" - pprint(self.dfas) - print "labels" - pprint(self.labels) - print "start", self.start - - -# Map from operator to number (since tokenize doesn't do this) - -opmap_raw = """ -( LPAR -) RPAR -[ LSQB -] RSQB -: COLON -, COMMA -; SEMI -+ PLUS -- MINUS -* STAR -/ SLASH -| VBAR -& AMPER -< LESS -> GREATER -= EQUAL -. DOT -% PERCENT -` BACKQUOTE -{ LBRACE -} RBRACE -@ AT -== EQEQUAL -!= NOTEQUAL -<> NOTEQUAL -<= LESSEQUAL ->= GREATEREQUAL -~ TILDE -^ CIRCUMFLEX -<< LEFTSHIFT ->> RIGHTSHIFT -** DOUBLESTAR -+= PLUSEQUAL --= MINEQUAL -*= STAREQUAL -/= SLASHEQUAL -%= PERCENTEQUAL -&= AMPEREQUAL -|= VBAREQUAL -^= CIRCUMFLEXEQUAL -<<= LEFTSHIFTEQUAL ->>= RIGHTSHIFTEQUAL -**= DOUBLESTAREQUAL -// DOUBLESLASH -//= DOUBLESLASHEQUAL --> RARROW -""" - -opmap = {} -for line in opmap_raw.splitlines(): - if line: - op, name = line.split() - opmap[op] = getattr(token, name) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/literals.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/literals.py deleted file mode 100644 index 0b3948a5..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/literals.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Safely evaluate Python string literals without using eval().""" - -import re - -simple_escapes = {"a": "\a", - "b": "\b", - "f": "\f", - "n": "\n", - "r": "\r", - "t": "\t", - "v": "\v", - "'": "'", - '"': '"', - "\\": "\\"} - -def escape(m): - all, tail = m.group(0, 1) - assert all.startswith("\\") - esc = simple_escapes.get(tail) - if esc is not None: - return esc - if tail.startswith("x"): - hexes = tail[1:] - if len(hexes) < 2: - raise ValueError("invalid hex string escape ('\\%s')" % tail) - try: - i = int(hexes, 16) - except ValueError: - raise ValueError("invalid hex string escape ('\\%s')" % tail) - else: - try: - i = int(tail, 8) - except ValueError: - raise ValueError("invalid octal string escape ('\\%s')" % tail) - return chr(i) - -def evalString(s): - assert s.startswith("'") or s.startswith('"'), repr(s[:1]) - q = s[0] - if s[:3] == q*3: - q = q*3 - assert s.endswith(q), repr(s[-len(q):]) - assert len(s) >= 2*len(q) - s = s[len(q):-len(q)] - return re.sub(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})", escape, s) - -def test(): - for i in range(256): - c = chr(i) - s = repr(c) - e = evalString(s) - if e != c: - print i, c, s, e - - -if __name__ == "__main__": - test() diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/parse.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/parse.py deleted file mode 100644 index 6bebdbba..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/parse.py +++ /dev/null @@ -1,201 +0,0 @@ -# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Parser engine for the grammar tables generated by pgen. - -The grammar table must be loaded first. - -See Parser/parser.c in the Python distribution for additional info on -how this parsing engine works. - -""" - -# Local imports -from . import token - -class ParseError(Exception): - """Exception to signal the parser is stuck.""" - - def __init__(self, msg, type, value, context): - Exception.__init__(self, "%s: type=%r, value=%r, context=%r" % - (msg, type, value, context)) - self.msg = msg - self.type = type - self.value = value - self.context = context - -class Parser(object): - """Parser engine. - - The proper usage sequence is: - - p = Parser(grammar, [converter]) # create instance - p.setup([start]) # prepare for parsing - : - if p.addtoken(...): # parse a token; may raise ParseError - break - root = p.rootnode # root of abstract syntax tree - - A Parser instance may be reused by calling setup() repeatedly. - - A Parser instance contains state pertaining to the current token - sequence, and should not be used concurrently by different threads - to parse separate token sequences. - - See driver.py for how to get input tokens by tokenizing a file or - string. - - Parsing is complete when addtoken() returns True; the root of the - abstract syntax tree can then be retrieved from the rootnode - instance variable. When a syntax error occurs, addtoken() raises - the ParseError exception. There is no error recovery; the parser - cannot be used after a syntax error was reported (but it can be - reinitialized by calling setup()). - - """ - - def __init__(self, grammar, convert=None): - """Constructor. - - The grammar argument is a grammar.Grammar instance; see the - grammar module for more information. - - The parser is not ready yet for parsing; you must call the - setup() method to get it started. - - The optional convert argument is a function mapping concrete - syntax tree nodes to abstract syntax tree nodes. If not - given, no conversion is done and the syntax tree produced is - the concrete syntax tree. If given, it must be a function of - two arguments, the first being the grammar (a grammar.Grammar - instance), and the second being the concrete syntax tree node - to be converted. The syntax tree is converted from the bottom - up. - - A concrete syntax tree node is a (type, value, context, nodes) - tuple, where type is the node type (a token or symbol number), - value is None for symbols and a string for tokens, context is - None or an opaque value used for error reporting (typically a - (lineno, offset) pair), and nodes is a list of children for - symbols, and None for tokens. - - An abstract syntax tree node may be anything; this is entirely - up to the converter function. - - """ - self.grammar = grammar - self.convert = convert or (lambda grammar, node: node) - - def setup(self, start=None): - """Prepare for parsing. - - This *must* be called before starting to parse. - - The optional argument is an alternative start symbol; it - defaults to the grammar's start symbol. - - You can use a Parser instance to parse any number of programs; - each time you call setup() the parser is reset to an initial - state determined by the (implicit or explicit) start symbol. - - """ - if start is None: - start = self.grammar.start - # Each stack entry is a tuple: (dfa, state, node). - # A node is a tuple: (type, value, context, children), - # where children is a list of nodes or None, and context may be None. - newnode = (start, None, None, []) - stackentry = (self.grammar.dfas[start], 0, newnode) - self.stack = [stackentry] - self.rootnode = None - self.used_names = set() # Aliased to self.rootnode.used_names in pop() - - def addtoken(self, type, value, context): - """Add a token; return True iff this is the end of the program.""" - # Map from token to label - ilabel = self.classify(type, value, context) - # Loop until the token is shifted; may raise exceptions - while True: - dfa, state, node = self.stack[-1] - states, first = dfa - arcs = states[state] - # Look for a state with this label - for i, newstate in arcs: - t, v = self.grammar.labels[i] - if ilabel == i: - # Look it up in the list of labels - assert t < 256 - # Shift a token; we're done with it - self.shift(type, value, newstate, context) - # Pop while we are in an accept-only state - state = newstate - while states[state] == [(0, state)]: - self.pop() - if not self.stack: - # Done parsing! - return True - dfa, state, node = self.stack[-1] - states, first = dfa - # Done with this token - return False - elif t >= 256: - # See if it's a symbol and if we're in its first set - itsdfa = self.grammar.dfas[t] - itsstates, itsfirst = itsdfa - if ilabel in itsfirst: - # Push a symbol - self.push(t, self.grammar.dfas[t], newstate, context) - break # To continue the outer while loop - else: - if (0, state) in arcs: - # An accepting state, pop it and try something else - self.pop() - if not self.stack: - # Done parsing, but another token is input - raise ParseError("too much input", - type, value, context) - else: - # No success finding a transition - raise ParseError("bad input", type, value, context) - - def classify(self, type, value, context): - """Turn a token into a label. (Internal)""" - if type == token.NAME: - # Keep a listing of all used names - self.used_names.add(value) - # Check for reserved words - ilabel = self.grammar.keywords.get(value) - if ilabel is not None: - return ilabel - ilabel = self.grammar.tokens.get(type) - if ilabel is None: - raise ParseError("bad token", type, value, context) - return ilabel - - def shift(self, type, value, newstate, context): - """Shift a token. (Internal)""" - dfa, state, node = self.stack[-1] - newnode = (type, value, context, None) - newnode = self.convert(self.grammar, newnode) - if newnode is not None: - node[-1].append(newnode) - self.stack[-1] = (dfa, newstate, node) - - def push(self, type, newdfa, newstate, context): - """Push a nonterminal. (Internal)""" - dfa, state, node = self.stack[-1] - newnode = (type, None, context, []) - self.stack[-1] = (dfa, newstate, node) - self.stack.append((newdfa, 0, newnode)) - - def pop(self): - """Pop a nonterminal. (Internal)""" - popdfa, popstate, popnode = self.stack.pop() - newnode = self.convert(self.grammar, popnode) - if newnode is not None: - if self.stack: - dfa, state, node = self.stack[-1] - node[-1].append(newnode) - else: - self.rootnode = newnode - self.rootnode.used_names = self.used_names diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/pgen.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/pgen.py deleted file mode 100644 index 63084a4c..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/pgen.py +++ /dev/null @@ -1,386 +0,0 @@ -# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -# Pgen imports -from . import grammar, token, tokenize - -class PgenGrammar(grammar.Grammar): - pass - -class ParserGenerator(object): - - def __init__(self, filename, stream=None): - close_stream = None - if stream is None: - stream = open(filename) - close_stream = stream.close - self.filename = filename - self.stream = stream - self.generator = tokenize.generate_tokens(stream.readline) - self.gettoken() # Initialize lookahead - self.dfas, self.startsymbol = self.parse() - if close_stream is not None: - close_stream() - self.first = {} # map from symbol name to set of tokens - self.addfirstsets() - - def make_grammar(self): - c = PgenGrammar() - names = self.dfas.keys() - names.sort() - names.remove(self.startsymbol) - names.insert(0, self.startsymbol) - for name in names: - i = 256 + len(c.symbol2number) - c.symbol2number[name] = i - c.number2symbol[i] = name - for name in names: - dfa = self.dfas[name] - states = [] - for state in dfa: - arcs = [] - for label, next in state.arcs.iteritems(): - arcs.append((self.make_label(c, label), dfa.index(next))) - if state.isfinal: - arcs.append((0, dfa.index(state))) - states.append(arcs) - c.states.append(states) - c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name)) - c.start = c.symbol2number[self.startsymbol] - return c - - def make_first(self, c, name): - rawfirst = self.first[name] - first = {} - for label in rawfirst: - ilabel = self.make_label(c, label) - ##assert ilabel not in first # XXX failed on <> ... != - first[ilabel] = 1 - return first - - def make_label(self, c, label): - # XXX Maybe this should be a method on a subclass of converter? - ilabel = len(c.labels) - if label[0].isalpha(): - # Either a symbol name or a named token - if label in c.symbol2number: - # A symbol name (a non-terminal) - if label in c.symbol2label: - return c.symbol2label[label] - else: - c.labels.append((c.symbol2number[label], None)) - c.symbol2label[label] = ilabel - return ilabel - else: - # A named token (NAME, NUMBER, STRING) - itoken = getattr(token, label, None) - assert isinstance(itoken, int), label - assert itoken in token.tok_name, label - if itoken in c.tokens: - return c.tokens[itoken] - else: - c.labels.append((itoken, None)) - c.tokens[itoken] = ilabel - return ilabel - else: - # Either a keyword or an operator - assert label[0] in ('"', "'"), label - value = eval(label) - if value[0].isalpha(): - # A keyword - if value in c.keywords: - return c.keywords[value] - else: - c.labels.append((token.NAME, value)) - c.keywords[value] = ilabel - return ilabel - else: - # An operator (any non-numeric token) - itoken = grammar.opmap[value] # Fails if unknown token - if itoken in c.tokens: - return c.tokens[itoken] - else: - c.labels.append((itoken, None)) - c.tokens[itoken] = ilabel - return ilabel - - def addfirstsets(self): - names = self.dfas.keys() - names.sort() - for name in names: - if name not in self.first: - self.calcfirst(name) - #print name, self.first[name].keys() - - def calcfirst(self, name): - dfa = self.dfas[name] - self.first[name] = None # dummy to detect left recursion - state = dfa[0] - totalset = {} - overlapcheck = {} - for label, next in state.arcs.iteritems(): - if label in self.dfas: - if label in self.first: - fset = self.first[label] - if fset is None: - raise ValueError("recursion for rule %r" % name) - else: - self.calcfirst(label) - fset = self.first[label] - totalset.update(fset) - overlapcheck[label] = fset - else: - totalset[label] = 1 - overlapcheck[label] = {label: 1} - inverse = {} - for label, itsfirst in overlapcheck.iteritems(): - for symbol in itsfirst: - if symbol in inverse: - raise ValueError("rule %s is ambiguous; %s is in the" - " first sets of %s as well as %s" % - (name, symbol, label, inverse[symbol])) - inverse[symbol] = label - self.first[name] = totalset - - def parse(self): - dfas = {} - startsymbol = None - # MSTART: (NEWLINE | RULE)* ENDMARKER - while self.type != token.ENDMARKER: - while self.type == token.NEWLINE: - self.gettoken() - # RULE: NAME ':' RHS NEWLINE - name = self.expect(token.NAME) - self.expect(token.OP, ":") - a, z = self.parse_rhs() - self.expect(token.NEWLINE) - #self.dump_nfa(name, a, z) - dfa = self.make_dfa(a, z) - #self.dump_dfa(name, dfa) - oldlen = len(dfa) - self.simplify_dfa(dfa) - newlen = len(dfa) - dfas[name] = dfa - #print name, oldlen, newlen - if startsymbol is None: - startsymbol = name - return dfas, startsymbol - - def make_dfa(self, start, finish): - # To turn an NFA into a DFA, we define the states of the DFA - # to correspond to *sets* of states of the NFA. Then do some - # state reduction. Let's represent sets as dicts with 1 for - # values. - assert isinstance(start, NFAState) - assert isinstance(finish, NFAState) - def closure(state): - base = {} - addclosure(state, base) - return base - def addclosure(state, base): - assert isinstance(state, NFAState) - if state in base: - return - base[state] = 1 - for label, next in state.arcs: - if label is None: - addclosure(next, base) - states = [DFAState(closure(start), finish)] - for state in states: # NB states grows while we're iterating - arcs = {} - for nfastate in state.nfaset: - for label, next in nfastate.arcs: - if label is not None: - addclosure(next, arcs.setdefault(label, {})) - for label, nfaset in arcs.iteritems(): - for st in states: - if st.nfaset == nfaset: - break - else: - st = DFAState(nfaset, finish) - states.append(st) - state.addarc(st, label) - return states # List of DFAState instances; first one is start - - def dump_nfa(self, name, start, finish): - print "Dump of NFA for", name - todo = [start] - for i, state in enumerate(todo): - print " State", i, state is finish and "(final)" or "" - for label, next in state.arcs: - if next in todo: - j = todo.index(next) - else: - j = len(todo) - todo.append(next) - if label is None: - print " -> %d" % j - else: - print " %s -> %d" % (label, j) - - def dump_dfa(self, name, dfa): - print "Dump of DFA for", name - for i, state in enumerate(dfa): - print " State", i, state.isfinal and "(final)" or "" - for label, next in state.arcs.iteritems(): - print " %s -> %d" % (label, dfa.index(next)) - - def simplify_dfa(self, dfa): - # This is not theoretically optimal, but works well enough. - # Algorithm: repeatedly look for two states that have the same - # set of arcs (same labels pointing to the same nodes) and - # unify them, until things stop changing. - - # dfa is a list of DFAState instances - changes = True - while changes: - changes = False - for i, state_i in enumerate(dfa): - for j in range(i+1, len(dfa)): - state_j = dfa[j] - if state_i == state_j: - #print " unify", i, j - del dfa[j] - for state in dfa: - state.unifystate(state_j, state_i) - changes = True - break - - def parse_rhs(self): - # RHS: ALT ('|' ALT)* - a, z = self.parse_alt() - if self.value != "|": - return a, z - else: - aa = NFAState() - zz = NFAState() - aa.addarc(a) - z.addarc(zz) - while self.value == "|": - self.gettoken() - a, z = self.parse_alt() - aa.addarc(a) - z.addarc(zz) - return aa, zz - - def parse_alt(self): - # ALT: ITEM+ - a, b = self.parse_item() - while (self.value in ("(", "[") or - self.type in (token.NAME, token.STRING)): - c, d = self.parse_item() - b.addarc(c) - b = d - return a, b - - def parse_item(self): - # ITEM: '[' RHS ']' | ATOM ['+' | '*'] - if self.value == "[": - self.gettoken() - a, z = self.parse_rhs() - self.expect(token.OP, "]") - a.addarc(z) - return a, z - else: - a, z = self.parse_atom() - value = self.value - if value not in ("+", "*"): - return a, z - self.gettoken() - z.addarc(a) - if value == "+": - return a, z - else: - return a, a - - def parse_atom(self): - # ATOM: '(' RHS ')' | NAME | STRING - if self.value == "(": - self.gettoken() - a, z = self.parse_rhs() - self.expect(token.OP, ")") - return a, z - elif self.type in (token.NAME, token.STRING): - a = NFAState() - z = NFAState() - a.addarc(z, self.value) - self.gettoken() - return a, z - else: - self.raise_error("expected (...) or NAME or STRING, got %s/%s", - self.type, self.value) - - def expect(self, type, value=None): - if self.type != type or (value is not None and self.value != value): - self.raise_error("expected %s/%s, got %s/%s", - type, value, self.type, self.value) - value = self.value - self.gettoken() - return value - - def gettoken(self): - tup = self.generator.next() - while tup[0] in (tokenize.COMMENT, tokenize.NL): - tup = self.generator.next() - self.type, self.value, self.begin, self.end, self.line = tup - #print token.tok_name[self.type], repr(self.value) - - def raise_error(self, msg, *args): - if args: - try: - msg = msg % args - except: - msg = " ".join([msg] + map(str, args)) - raise SyntaxError(msg, (self.filename, self.end[0], - self.end[1], self.line)) - -class NFAState(object): - - def __init__(self): - self.arcs = [] # list of (label, NFAState) pairs - - def addarc(self, next, label=None): - assert label is None or isinstance(label, str) - assert isinstance(next, NFAState) - self.arcs.append((label, next)) - -class DFAState(object): - - def __init__(self, nfaset, final): - assert isinstance(nfaset, dict) - assert isinstance(iter(nfaset).next(), NFAState) - assert isinstance(final, NFAState) - self.nfaset = nfaset - self.isfinal = final in nfaset - self.arcs = {} # map from label to DFAState - - def addarc(self, next, label): - assert isinstance(label, str) - assert label not in self.arcs - assert isinstance(next, DFAState) - self.arcs[label] = next - - def unifystate(self, old, new): - for label, next in self.arcs.iteritems(): - if next is old: - self.arcs[label] = new - - def __eq__(self, other): - # Equality test -- ignore the nfaset instance variable - assert isinstance(other, DFAState) - if self.isfinal != other.isfinal: - return False - # Can't just return self.arcs == other.arcs, because that - # would invoke this method recursively, with cycles... - if len(self.arcs) != len(other.arcs): - return False - for label, next in self.arcs.iteritems(): - if next is not other.arcs.get(label): - return False - return True - - __hash__ = None # For Py3 compatibility. - -def generate_grammar(filename="Grammar.txt"): - p = ParserGenerator(filename) - return p.make_grammar() diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/token.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/token.py deleted file mode 100644 index 61468b31..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/token.py +++ /dev/null @@ -1,82 +0,0 @@ -#! /usr/bin/env python - -"""Token constants (from "token.h").""" - -# Taken from Python (r53757) and modified to include some tokens -# originally monkeypatched in by pgen2.tokenize - -#--start constants-- -ENDMARKER = 0 -NAME = 1 -NUMBER = 2 -STRING = 3 -NEWLINE = 4 -INDENT = 5 -DEDENT = 6 -LPAR = 7 -RPAR = 8 -LSQB = 9 -RSQB = 10 -COLON = 11 -COMMA = 12 -SEMI = 13 -PLUS = 14 -MINUS = 15 -STAR = 16 -SLASH = 17 -VBAR = 18 -AMPER = 19 -LESS = 20 -GREATER = 21 -EQUAL = 22 -DOT = 23 -PERCENT = 24 -BACKQUOTE = 25 -LBRACE = 26 -RBRACE = 27 -EQEQUAL = 28 -NOTEQUAL = 29 -LESSEQUAL = 30 -GREATEREQUAL = 31 -TILDE = 32 -CIRCUMFLEX = 33 -LEFTSHIFT = 34 -RIGHTSHIFT = 35 -DOUBLESTAR = 36 -PLUSEQUAL = 37 -MINEQUAL = 38 -STAREQUAL = 39 -SLASHEQUAL = 40 -PERCENTEQUAL = 41 -AMPEREQUAL = 42 -VBAREQUAL = 43 -CIRCUMFLEXEQUAL = 44 -LEFTSHIFTEQUAL = 45 -RIGHTSHIFTEQUAL = 46 -DOUBLESTAREQUAL = 47 -DOUBLESLASH = 48 -DOUBLESLASHEQUAL = 49 -AT = 50 -OP = 51 -COMMENT = 52 -NL = 53 -RARROW = 54 -ERRORTOKEN = 55 -N_TOKENS = 56 -NT_OFFSET = 256 -#--end constants-- - -tok_name = {} -for _name, _value in globals().items(): - if type(_value) is type(0): - tok_name[_value] = _name - - -def ISTERMINAL(x): - return x < NT_OFFSET - -def ISNONTERMINAL(x): - return x >= NT_OFFSET - -def ISEOF(x): - return x == ENDMARKER diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/tokenize.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/tokenize.py deleted file mode 100644 index f6e0284c..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pgen2/tokenize.py +++ /dev/null @@ -1,499 +0,0 @@ -# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation. -# All rights reserved. - -"""Tokenization help for Python programs. - -generate_tokens(readline) is a generator that breaks a stream of -text into Python tokens. It accepts a readline-like method which is called -repeatedly to get the next line of input (or "" for EOF). It generates -5-tuples with these members: - - the token type (see token.py) - the token (a string) - the starting (row, column) indices of the token (a 2-tuple of ints) - the ending (row, column) indices of the token (a 2-tuple of ints) - the original line (string) - -It is designed to match the working of the Python tokenizer exactly, except -that it produces COMMENT tokens for comments and gives type OP for all -operators - -Older entry points - tokenize_loop(readline, tokeneater) - tokenize(readline, tokeneater=printtoken) -are the same, except instead of generating tokens, tokeneater is a callback -function to which the 5 fields described above are passed as 5 arguments, -each time a new token is found.""" - -__author__ = 'Ka-Ping Yee ' -__credits__ = \ - 'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro' - -import string, re -from codecs import BOM_UTF8, lookup -from lib2to3.pgen2.token import * - -from . import token -__all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize", - "generate_tokens", "untokenize"] -del token - -try: - bytes -except NameError: - # Support bytes type in Python <= 2.5, so 2to3 turns itself into - # valid Python 3 code. - bytes = str - -def group(*choices): return '(' + '|'.join(choices) + ')' -def any(*choices): return group(*choices) + '*' -def maybe(*choices): return group(*choices) + '?' - -Whitespace = r'[ \f\t]*' -Comment = r'#[^\r\n]*' -Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment) -Name = r'[a-zA-Z_]\w*' - -Binnumber = r'0[bB][01]*' -Hexnumber = r'0[xX][\da-fA-F]*[lL]?' -Octnumber = r'0[oO]?[0-7]*[lL]?' -Decnumber = r'[1-9]\d*[lL]?' -Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber) -Exponent = r'[eE][-+]?\d+' -Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent) -Expfloat = r'\d+' + Exponent -Floatnumber = group(Pointfloat, Expfloat) -Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]') -Number = group(Imagnumber, Floatnumber, Intnumber) - -# Tail end of ' string. -Single = r"[^'\\]*(?:\\.[^'\\]*)*'" -# Tail end of " string. -Double = r'[^"\\]*(?:\\.[^"\\]*)*"' -# Tail end of ''' string. -Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" -# Tail end of """ string. -Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' -Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""') -# Single-line ' or " string. -String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'", - r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"') - -# Because of leftmost-then-longest match semantics, be sure to put the -# longest operators first (e.g., if = came before ==, == would get -# recognized as two instances of =). -Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=", - r"//=?", r"->", - r"[+\-*/%&|^=<>]=?", - r"~") - -Bracket = '[][(){}]' -Special = group(r'\r?\n', r'[:;.,`@]') -Funny = group(Operator, Bracket, Special) - -PlainToken = group(Number, Funny, String, Name) -Token = Ignore + PlainToken - -# First (or only) line of ' or " string. -ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" + - group("'", r'\\\r?\n'), - r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' + - group('"', r'\\\r?\n')) -PseudoExtras = group(r'\\\r?\n', Comment, Triple) -PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) - -tokenprog, pseudoprog, single3prog, double3prog = map( - re.compile, (Token, PseudoToken, Single3, Double3)) -endprogs = {"'": re.compile(Single), '"': re.compile(Double), - "'''": single3prog, '"""': double3prog, - "r'''": single3prog, 'r"""': double3prog, - "u'''": single3prog, 'u"""': double3prog, - "b'''": single3prog, 'b"""': double3prog, - "ur'''": single3prog, 'ur"""': double3prog, - "br'''": single3prog, 'br"""': double3prog, - "R'''": single3prog, 'R"""': double3prog, - "U'''": single3prog, 'U"""': double3prog, - "B'''": single3prog, 'B"""': double3prog, - "uR'''": single3prog, 'uR"""': double3prog, - "Ur'''": single3prog, 'Ur"""': double3prog, - "UR'''": single3prog, 'UR"""': double3prog, - "bR'''": single3prog, 'bR"""': double3prog, - "Br'''": single3prog, 'Br"""': double3prog, - "BR'''": single3prog, 'BR"""': double3prog, - 'r': None, 'R': None, - 'u': None, 'U': None, - 'b': None, 'B': None} - -triple_quoted = {} -for t in ("'''", '"""', - "r'''", 'r"""', "R'''", 'R"""', - "u'''", 'u"""', "U'''", 'U"""', - "b'''", 'b"""', "B'''", 'B"""', - "ur'''", 'ur"""', "Ur'''", 'Ur"""', - "uR'''", 'uR"""', "UR'''", 'UR"""', - "br'''", 'br"""', "Br'''", 'Br"""', - "bR'''", 'bR"""', "BR'''", 'BR"""',): - triple_quoted[t] = t -single_quoted = {} -for t in ("'", '"', - "r'", 'r"', "R'", 'R"', - "u'", 'u"', "U'", 'U"', - "b'", 'b"', "B'", 'B"', - "ur'", 'ur"', "Ur'", 'Ur"', - "uR'", 'uR"', "UR'", 'UR"', - "br'", 'br"', "Br'", 'Br"', - "bR'", 'bR"', "BR'", 'BR"', ): - single_quoted[t] = t - -tabsize = 8 - -class TokenError(Exception): pass - -class StopTokenizing(Exception): pass - -def printtoken(type, token, start, end, line): # for testing - (srow, scol) = start - (erow, ecol) = end - print "%d,%d-%d,%d:\t%s\t%s" % \ - (srow, scol, erow, ecol, tok_name[type], repr(token)) - -def tokenize(readline, tokeneater=printtoken): - """ - The tokenize() function accepts two parameters: one representing the - input stream, and one providing an output mechanism for tokenize(). - - The first parameter, readline, must be a callable object which provides - the same interface as the readline() method of built-in file objects. - Each call to the function should return one line of input as a string. - - The second parameter, tokeneater, must also be a callable object. It is - called once for each token, with five arguments, corresponding to the - tuples generated by generate_tokens(). - """ - try: - tokenize_loop(readline, tokeneater) - except StopTokenizing: - pass - -# backwards compatible interface -def tokenize_loop(readline, tokeneater): - for token_info in generate_tokens(readline): - tokeneater(*token_info) - -class Untokenizer: - - def __init__(self): - self.tokens = [] - self.prev_row = 1 - self.prev_col = 0 - - def add_whitespace(self, start): - row, col = start - assert row <= self.prev_row - col_offset = col - self.prev_col - if col_offset: - self.tokens.append(" " * col_offset) - - def untokenize(self, iterable): - for t in iterable: - if len(t) == 2: - self.compat(t, iterable) - break - tok_type, token, start, end, line = t - self.add_whitespace(start) - self.tokens.append(token) - self.prev_row, self.prev_col = end - if tok_type in (NEWLINE, NL): - self.prev_row += 1 - self.prev_col = 0 - return "".join(self.tokens) - - def compat(self, token, iterable): - startline = False - indents = [] - toks_append = self.tokens.append - toknum, tokval = token - if toknum in (NAME, NUMBER): - tokval += ' ' - if toknum in (NEWLINE, NL): - startline = True - for tok in iterable: - toknum, tokval = tok[:2] - - if toknum in (NAME, NUMBER): - tokval += ' ' - - if toknum == INDENT: - indents.append(tokval) - continue - elif toknum == DEDENT: - indents.pop() - continue - elif toknum in (NEWLINE, NL): - startline = True - elif startline and indents: - toks_append(indents[-1]) - startline = False - toks_append(tokval) - -cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)') - -def _get_normal_name(orig_enc): - """Imitates get_normal_name in tokenizer.c.""" - # Only care about the first 12 characters. - enc = orig_enc[:12].lower().replace("_", "-") - if enc == "utf-8" or enc.startswith("utf-8-"): - return "utf-8" - if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ - enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): - return "iso-8859-1" - return orig_enc - -def detect_encoding(readline): - """ - The detect_encoding() function is used to detect the encoding that should - be used to decode a Python source file. It requires one argment, readline, - in the same way as the tokenize() generator. - - It will call readline a maximum of twice, and return the encoding used - (as a string) and a list of any lines (left as bytes) it has read - in. - - It detects the encoding from the presence of a utf-8 bom or an encoding - cookie as specified in pep-0263. If both a bom and a cookie are present, but - disagree, a SyntaxError will be raised. If the encoding cookie is an invalid - charset, raise a SyntaxError. Note that if a utf-8 bom is found, - 'utf-8-sig' is returned. - - If no encoding is specified, then the default of 'utf-8' will be returned. - """ - bom_found = False - encoding = None - default = 'utf-8' - def read_or_stop(): - try: - return readline() - except StopIteration: - return bytes() - - def find_cookie(line): - try: - line_string = line.decode('ascii') - except UnicodeDecodeError: - return None - match = cookie_re.match(line_string) - if not match: - return None - encoding = _get_normal_name(match.group(1)) - try: - codec = lookup(encoding) - except LookupError: - # This behaviour mimics the Python interpreter - raise SyntaxError("unknown encoding: " + encoding) - - if bom_found: - if codec.name != 'utf-8': - # This behaviour mimics the Python interpreter - raise SyntaxError('encoding problem: utf-8') - encoding += '-sig' - return encoding - - first = read_or_stop() - if first.startswith(BOM_UTF8): - bom_found = True - first = first[3:] - default = 'utf-8-sig' - if not first: - return default, [] - - encoding = find_cookie(first) - if encoding: - return encoding, [first] - - second = read_or_stop() - if not second: - return default, [first] - - encoding = find_cookie(second) - if encoding: - return encoding, [first, second] - - return default, [first, second] - -def untokenize(iterable): - """Transform tokens back into Python source code. - - Each element returned by the iterable must be a token sequence - with at least two elements, a token number and token value. If - only two tokens are passed, the resulting output is poor. - - Round-trip invariant for full input: - Untokenized source will match input source exactly - - Round-trip invariant for limited intput: - # Output text will tokenize the back to the input - t1 = [tok[:2] for tok in generate_tokens(f.readline)] - newcode = untokenize(t1) - readline = iter(newcode.splitlines(1)).next - t2 = [tok[:2] for tokin generate_tokens(readline)] - assert t1 == t2 - """ - ut = Untokenizer() - return ut.untokenize(iterable) - -def generate_tokens(readline): - """ - The generate_tokens() generator requires one argment, readline, which - must be a callable object which provides the same interface as the - readline() method of built-in file objects. Each call to the function - should return one line of input as a string. Alternately, readline - can be a callable function terminating with StopIteration: - readline = open(myfile).next # Example of alternate readline - - The generator produces 5-tuples with these members: the token type; the - token string; a 2-tuple (srow, scol) of ints specifying the row and - column where the token begins in the source; a 2-tuple (erow, ecol) of - ints specifying the row and column where the token ends in the source; - and the line on which the token was found. The line passed is the - logical line; continuation lines are included. - """ - lnum = parenlev = continued = 0 - namechars, numchars = string.ascii_letters + '_', '0123456789' - contstr, needcont = '', 0 - contline = None - indents = [0] - - while 1: # loop over lines in stream - try: - line = readline() - except StopIteration: - line = '' - lnum = lnum + 1 - pos, max = 0, len(line) - - if contstr: # continued string - if not line: - raise TokenError, ("EOF in multi-line string", strstart) - endmatch = endprog.match(line) - if endmatch: - pos = end = endmatch.end(0) - yield (STRING, contstr + line[:end], - strstart, (lnum, end), contline + line) - contstr, needcont = '', 0 - contline = None - elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': - yield (ERRORTOKEN, contstr + line, - strstart, (lnum, len(line)), contline) - contstr = '' - contline = None - continue - else: - contstr = contstr + line - contline = contline + line - continue - - elif parenlev == 0 and not continued: # new statement - if not line: break - column = 0 - while pos < max: # measure leading whitespace - if line[pos] == ' ': column = column + 1 - elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize - elif line[pos] == '\f': column = 0 - else: break - pos = pos + 1 - if pos == max: break - - if line[pos] in '#\r\n': # skip comments or blank lines - if line[pos] == '#': - comment_token = line[pos:].rstrip('\r\n') - nl_pos = pos + len(comment_token) - yield (COMMENT, comment_token, - (lnum, pos), (lnum, pos + len(comment_token)), line) - yield (NL, line[nl_pos:], - (lnum, nl_pos), (lnum, len(line)), line) - else: - yield ((NL, COMMENT)[line[pos] == '#'], line[pos:], - (lnum, pos), (lnum, len(line)), line) - continue - - if column > indents[-1]: # count indents or dedents - indents.append(column) - yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line) - while column < indents[-1]: - if column not in indents: - raise IndentationError( - "unindent does not match any outer indentation level", - ("", lnum, pos, line)) - indents = indents[:-1] - yield (DEDENT, '', (lnum, pos), (lnum, pos), line) - - else: # continued statement - if not line: - raise TokenError, ("EOF in multi-line statement", (lnum, 0)) - continued = 0 - - while pos < max: - pseudomatch = pseudoprog.match(line, pos) - if pseudomatch: # scan for tokens - start, end = pseudomatch.span(1) - spos, epos, pos = (lnum, start), (lnum, end), end - token, initial = line[start:end], line[start] - - if initial in numchars or \ - (initial == '.' and token != '.'): # ordinary number - yield (NUMBER, token, spos, epos, line) - elif initial in '\r\n': - newline = NEWLINE - if parenlev > 0: - newline = NL - yield (newline, token, spos, epos, line) - elif initial == '#': - assert not token.endswith("\n") - yield (COMMENT, token, spos, epos, line) - elif token in triple_quoted: - endprog = endprogs[token] - endmatch = endprog.match(line, pos) - if endmatch: # all on one line - pos = endmatch.end(0) - token = line[start:pos] - yield (STRING, token, spos, (lnum, pos), line) - else: - strstart = (lnum, start) # multiple lines - contstr = line[start:] - contline = line - break - elif initial in single_quoted or \ - token[:2] in single_quoted or \ - token[:3] in single_quoted: - if token[-1] == '\n': # continued string - strstart = (lnum, start) - endprog = (endprogs[initial] or endprogs[token[1]] or - endprogs[token[2]]) - contstr, needcont = line[start:], 1 - contline = line - break - else: # ordinary string - yield (STRING, token, spos, epos, line) - elif initial in namechars: # ordinary name - yield (NAME, token, spos, epos, line) - elif initial == '\\': # continued stmt - # This yield is new; needed for better idempotency: - yield (NL, token, spos, (lnum, pos), line) - continued = 1 - else: - if initial in '([{': parenlev = parenlev + 1 - elif initial in ')]}': parenlev = parenlev - 1 - yield (OP, token, spos, epos, line) - else: - yield (ERRORTOKEN, line[pos], - (lnum, pos), (lnum, pos+1), line) - pos = pos + 1 - - for indent in indents[1:]: # pop remaining indent levels - yield (DEDENT, '', (lnum, 0), (lnum, 0), '') - yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '') - -if __name__ == '__main__': # testing - import sys - if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline) - else: tokenize(sys.stdin.readline) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pygram.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pygram.py deleted file mode 100644 index 621ff24c..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pygram.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2006 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Export the Python grammar and symbols.""" - -# Python imports -import os - -# Local imports -from .pgen2 import token -from .pgen2 import driver -from . import pytree - -# The grammar file -_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt") -_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), - "PatternGrammar.txt") - - -class Symbols(object): - - def __init__(self, grammar): - """Initializer. - - Creates an attribute for each grammar symbol (nonterminal), - whose value is the symbol's type (an int >= 256). - """ - for name, symbol in grammar.symbol2number.iteritems(): - setattr(self, name, symbol) - - -python_grammar = driver.load_grammar(_GRAMMAR_FILE) - -python_symbols = Symbols(python_grammar) - -python_grammar_no_print_statement = python_grammar.copy() -del python_grammar_no_print_statement.keywords["print"] - -pattern_grammar = driver.load_grammar(_PATTERN_GRAMMAR_FILE) -pattern_symbols = Symbols(pattern_grammar) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pytree.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pytree.py deleted file mode 100644 index 179caca5..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/pytree.py +++ /dev/null @@ -1,887 +0,0 @@ -# Copyright 2006 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -""" -Python parse tree definitions. - -This is a very concrete parse tree; we need to keep every token and -even the comments and whitespace between tokens. - -There's also a pattern matching implementation here. -""" - -__author__ = "Guido van Rossum " - -import sys -import warnings -from StringIO import StringIO - -HUGE = 0x7FFFFFFF # maximum repeat count, default max - -_type_reprs = {} -def type_repr(type_num): - global _type_reprs - if not _type_reprs: - from .pygram import python_symbols - # printing tokens is possible but not as useful - # from .pgen2 import token // token.__dict__.items(): - for name, val in python_symbols.__dict__.items(): - if type(val) == int: _type_reprs[val] = name - return _type_reprs.setdefault(type_num, type_num) - -class Base(object): - - """ - Abstract base class for Node and Leaf. - - This provides some default functionality and boilerplate using the - template pattern. - - A node may be a subnode of at most one parent. - """ - - # Default values for instance variables - type = None # int: token number (< 256) or symbol number (>= 256) - parent = None # Parent node pointer, or None - children = () # Tuple of subnodes - was_changed = False - was_checked = False - - def __new__(cls, *args, **kwds): - """Constructor that prevents Base from being instantiated.""" - assert cls is not Base, "Cannot instantiate Base" - return object.__new__(cls) - - def __eq__(self, other): - """ - Compare two nodes for equality. - - This calls the method _eq(). - """ - if self.__class__ is not other.__class__: - return NotImplemented - return self._eq(other) - - __hash__ = None # For Py3 compatibility. - - def __ne__(self, other): - """ - Compare two nodes for inequality. - - This calls the method _eq(). - """ - if self.__class__ is not other.__class__: - return NotImplemented - return not self._eq(other) - - def _eq(self, other): - """ - Compare two nodes for equality. - - This is called by __eq__ and __ne__. It is only called if the two nodes - have the same type. This must be implemented by the concrete subclass. - Nodes should be considered equal if they have the same structure, - ignoring the prefix string and other context information. - """ - raise NotImplementedError - - def clone(self): - """ - Return a cloned (deep) copy of self. - - This must be implemented by the concrete subclass. - """ - raise NotImplementedError - - def post_order(self): - """ - Return a post-order iterator for the tree. - - This must be implemented by the concrete subclass. - """ - raise NotImplementedError - - def pre_order(self): - """ - Return a pre-order iterator for the tree. - - This must be implemented by the concrete subclass. - """ - raise NotImplementedError - - def set_prefix(self, prefix): - """ - Set the prefix for the node (see Leaf class). - - DEPRECATED; use the prefix property directly. - """ - warnings.warn("set_prefix() is deprecated; use the prefix property", - DeprecationWarning, stacklevel=2) - self.prefix = prefix - - def get_prefix(self): - """ - Return the prefix for the node (see Leaf class). - - DEPRECATED; use the prefix property directly. - """ - warnings.warn("get_prefix() is deprecated; use the prefix property", - DeprecationWarning, stacklevel=2) - return self.prefix - - def replace(self, new): - """Replace this node with a new one in the parent.""" - assert self.parent is not None, str(self) - assert new is not None - if not isinstance(new, list): - new = [new] - l_children = [] - found = False - for ch in self.parent.children: - if ch is self: - assert not found, (self.parent.children, self, new) - if new is not None: - l_children.extend(new) - found = True - else: - l_children.append(ch) - assert found, (self.children, self, new) - self.parent.changed() - self.parent.children = l_children - for x in new: - x.parent = self.parent - self.parent = None - - def get_lineno(self): - """Return the line number which generated the invocant node.""" - node = self - while not isinstance(node, Leaf): - if not node.children: - return - node = node.children[0] - return node.lineno - - def changed(self): - if self.parent: - self.parent.changed() - self.was_changed = True - - def remove(self): - """ - Remove the node from the tree. Returns the position of the node in its - parent's children before it was removed. - """ - if self.parent: - for i, node in enumerate(self.parent.children): - if node is self: - self.parent.changed() - del self.parent.children[i] - self.parent = None - return i - - @property - def next_sibling(self): - """ - The node immediately following the invocant in their parent's children - list. If the invocant does not have a next sibling, it is None - """ - if self.parent is None: - return None - - # Can't use index(); we need to test by identity - for i, child in enumerate(self.parent.children): - if child is self: - try: - return self.parent.children[i+1] - except IndexError: - return None - - @property - def prev_sibling(self): - """ - The node immediately preceding the invocant in their parent's children - list. If the invocant does not have a previous sibling, it is None. - """ - if self.parent is None: - return None - - # Can't use index(); we need to test by identity - for i, child in enumerate(self.parent.children): - if child is self: - if i == 0: - return None - return self.parent.children[i-1] - - def leaves(self): - for child in self.children: - for x in child.leaves(): - yield x - - def depth(self): - if self.parent is None: - return 0 - return 1 + self.parent.depth() - - def get_suffix(self): - """ - Return the string immediately following the invocant node. This is - effectively equivalent to node.next_sibling.prefix - """ - next_sib = self.next_sibling - if next_sib is None: - return u"" - return next_sib.prefix - - if sys.version_info < (3, 0): - def __str__(self): - return unicode(self).encode("ascii") - -class Node(Base): - - """Concrete implementation for interior nodes.""" - - def __init__(self,type, children, - context=None, - prefix=None, - fixers_applied=None): - """ - Initializer. - - Takes a type constant (a symbol number >= 256), a sequence of - child nodes, and an optional context keyword argument. - - As a side effect, the parent pointers of the children are updated. - """ - assert type >= 256, type - self.type = type - self.children = list(children) - for ch in self.children: - assert ch.parent is None, repr(ch) - ch.parent = self - if prefix is not None: - self.prefix = prefix - if fixers_applied: - self.fixers_applied = fixers_applied[:] - else: - self.fixers_applied = None - - def __repr__(self): - """Return a canonical string representation.""" - return "%s(%s, %r)" % (self.__class__.__name__, - type_repr(self.type), - self.children) - - def __unicode__(self): - """ - Return a pretty string representation. - - This reproduces the input source exactly. - """ - return u"".join(map(unicode, self.children)) - - if sys.version_info > (3, 0): - __str__ = __unicode__ - - def _eq(self, other): - """Compare two nodes for equality.""" - return (self.type, self.children) == (other.type, other.children) - - def clone(self): - """Return a cloned (deep) copy of self.""" - return Node(self.type, [ch.clone() for ch in self.children], - fixers_applied=self.fixers_applied) - - def post_order(self): - """Return a post-order iterator for the tree.""" - for child in self.children: - for node in child.post_order(): - yield node - yield self - - def pre_order(self): - """Return a pre-order iterator for the tree.""" - yield self - for child in self.children: - for node in child.pre_order(): - yield node - - def _prefix_getter(self): - """ - The whitespace and comments preceding this node in the input. - """ - if not self.children: - return "" - return self.children[0].prefix - - def _prefix_setter(self, prefix): - if self.children: - self.children[0].prefix = prefix - - prefix = property(_prefix_getter, _prefix_setter) - - def set_child(self, i, child): - """ - Equivalent to 'node.children[i] = child'. This method also sets the - child's parent attribute appropriately. - """ - child.parent = self - self.children[i].parent = None - self.children[i] = child - self.changed() - - def insert_child(self, i, child): - """ - Equivalent to 'node.children.insert(i, child)'. This method also sets - the child's parent attribute appropriately. - """ - child.parent = self - self.children.insert(i, child) - self.changed() - - def append_child(self, child): - """ - Equivalent to 'node.children.append(child)'. This method also sets the - child's parent attribute appropriately. - """ - child.parent = self - self.children.append(child) - self.changed() - - -class Leaf(Base): - - """Concrete implementation for leaf nodes.""" - - # Default values for instance variables - _prefix = "" # Whitespace and comments preceding this token in the input - lineno = 0 # Line where this token starts in the input - column = 0 # Column where this token tarts in the input - - def __init__(self, type, value, - context=None, - prefix=None, - fixers_applied=[]): - """ - Initializer. - - Takes a type constant (a token number < 256), a string value, and an - optional context keyword argument. - """ - assert 0 <= type < 256, type - if context is not None: - self._prefix, (self.lineno, self.column) = context - self.type = type - self.value = value - if prefix is not None: - self._prefix = prefix - self.fixers_applied = fixers_applied[:] - - def __repr__(self): - """Return a canonical string representation.""" - return "%s(%r, %r)" % (self.__class__.__name__, - self.type, - self.value) - - def __unicode__(self): - """ - Return a pretty string representation. - - This reproduces the input source exactly. - """ - return self.prefix + unicode(self.value) - - if sys.version_info > (3, 0): - __str__ = __unicode__ - - def _eq(self, other): - """Compare two nodes for equality.""" - return (self.type, self.value) == (other.type, other.value) - - def clone(self): - """Return a cloned (deep) copy of self.""" - return Leaf(self.type, self.value, - (self.prefix, (self.lineno, self.column)), - fixers_applied=self.fixers_applied) - - def leaves(self): - yield self - - def post_order(self): - """Return a post-order iterator for the tree.""" - yield self - - def pre_order(self): - """Return a pre-order iterator for the tree.""" - yield self - - def _prefix_getter(self): - """ - The whitespace and comments preceding this token in the input. - """ - return self._prefix - - def _prefix_setter(self, prefix): - self.changed() - self._prefix = prefix - - prefix = property(_prefix_getter, _prefix_setter) - -def convert(gr, raw_node): - """ - Convert raw node information to a Node or Leaf instance. - - This is passed to the parser driver which calls it whenever a reduction of a - grammar rule produces a new complete node, so that the tree is build - strictly bottom-up. - """ - type, value, context, children = raw_node - if children or type in gr.number2symbol: - # If there's exactly one child, return that child instead of - # creating a new node. - if len(children) == 1: - return children[0] - return Node(type, children, context=context) - else: - return Leaf(type, value, context=context) - - -class BasePattern(object): - - """ - A pattern is a tree matching pattern. - - It looks for a specific node type (token or symbol), and - optionally for a specific content. - - This is an abstract base class. There are three concrete - subclasses: - - - LeafPattern matches a single leaf node; - - NodePattern matches a single node (usually non-leaf); - - WildcardPattern matches a sequence of nodes of variable length. - """ - - # Defaults for instance variables - type = None # Node type (token if < 256, symbol if >= 256) - content = None # Optional content matching pattern - name = None # Optional name used to store match in results dict - - def __new__(cls, *args, **kwds): - """Constructor that prevents BasePattern from being instantiated.""" - assert cls is not BasePattern, "Cannot instantiate BasePattern" - return object.__new__(cls) - - def __repr__(self): - args = [type_repr(self.type), self.content, self.name] - while args and args[-1] is None: - del args[-1] - return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args))) - - def optimize(self): - """ - A subclass can define this as a hook for optimizations. - - Returns either self or another node with the same effect. - """ - return self - - def match(self, node, results=None): - """ - Does this pattern exactly match a node? - - Returns True if it matches, False if not. - - If results is not None, it must be a dict which will be - updated with the nodes matching named subpatterns. - - Default implementation for non-wildcard patterns. - """ - if self.type is not None and node.type != self.type: - return False - if self.content is not None: - r = None - if results is not None: - r = {} - if not self._submatch(node, r): - return False - if r: - results.update(r) - if results is not None and self.name: - results[self.name] = node - return True - - def match_seq(self, nodes, results=None): - """ - Does this pattern exactly match a sequence of nodes? - - Default implementation for non-wildcard patterns. - """ - if len(nodes) != 1: - return False - return self.match(nodes[0], results) - - def generate_matches(self, nodes): - """ - Generator yielding all matches for this pattern. - - Default implementation for non-wildcard patterns. - """ - r = {} - if nodes and self.match(nodes[0], r): - yield 1, r - - -class LeafPattern(BasePattern): - - def __init__(self, type=None, content=None, name=None): - """ - Initializer. Takes optional type, content, and name. - - The type, if given must be a token type (< 256). If not given, - this matches any *leaf* node; the content may still be required. - - The content, if given, must be a string. - - If a name is given, the matching node is stored in the results - dict under that key. - """ - if type is not None: - assert 0 <= type < 256, type - if content is not None: - assert isinstance(content, basestring), repr(content) - self.type = type - self.content = content - self.name = name - - def match(self, node, results=None): - """Override match() to insist on a leaf node.""" - if not isinstance(node, Leaf): - return False - return BasePattern.match(self, node, results) - - def _submatch(self, node, results=None): - """ - Match the pattern's content to the node's children. - - This assumes the node type matches and self.content is not None. - - Returns True if it matches, False if not. - - If results is not None, it must be a dict which will be - updated with the nodes matching named subpatterns. - - When returning False, the results dict may still be updated. - """ - return self.content == node.value - - -class NodePattern(BasePattern): - - wildcards = False - - def __init__(self, type=None, content=None, name=None): - """ - Initializer. Takes optional type, content, and name. - - The type, if given, must be a symbol type (>= 256). If the - type is None this matches *any* single node (leaf or not), - except if content is not None, in which it only matches - non-leaf nodes that also match the content pattern. - - The content, if not None, must be a sequence of Patterns that - must match the node's children exactly. If the content is - given, the type must not be None. - - If a name is given, the matching node is stored in the results - dict under that key. - """ - if type is not None: - assert type >= 256, type - if content is not None: - assert not isinstance(content, basestring), repr(content) - content = list(content) - for i, item in enumerate(content): - assert isinstance(item, BasePattern), (i, item) - if isinstance(item, WildcardPattern): - self.wildcards = True - self.type = type - self.content = content - self.name = name - - def _submatch(self, node, results=None): - """ - Match the pattern's content to the node's children. - - This assumes the node type matches and self.content is not None. - - Returns True if it matches, False if not. - - If results is not None, it must be a dict which will be - updated with the nodes matching named subpatterns. - - When returning False, the results dict may still be updated. - """ - if self.wildcards: - for c, r in generate_matches(self.content, node.children): - if c == len(node.children): - if results is not None: - results.update(r) - return True - return False - if len(self.content) != len(node.children): - return False - for subpattern, child in zip(self.content, node.children): - if not subpattern.match(child, results): - return False - return True - - -class WildcardPattern(BasePattern): - - """ - A wildcard pattern can match zero or more nodes. - - This has all the flexibility needed to implement patterns like: - - .* .+ .? .{m,n} - (a b c | d e | f) - (...)* (...)+ (...)? (...){m,n} - - except it always uses non-greedy matching. - """ - - def __init__(self, content=None, min=0, max=HUGE, name=None): - """ - Initializer. - - Args: - content: optional sequence of subsequences of patterns; - if absent, matches one node; - if present, each subsequence is an alternative [*] - min: optional minimum number of times to match, default 0 - max: optional maximum number of times to match, default HUGE - name: optional name assigned to this match - - [*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is - equivalent to (a b c | d e | f g h); if content is None, - this is equivalent to '.' in regular expression terms. - The min and max parameters work as follows: - min=0, max=maxint: .* - min=1, max=maxint: .+ - min=0, max=1: .? - min=1, max=1: . - If content is not None, replace the dot with the parenthesized - list of alternatives, e.g. (a b c | d e | f g h)* - """ - assert 0 <= min <= max <= HUGE, (min, max) - if content is not None: - content = tuple(map(tuple, content)) # Protect against alterations - # Check sanity of alternatives - assert len(content), repr(content) # Can't have zero alternatives - for alt in content: - assert len(alt), repr(alt) # Can have empty alternatives - self.content = content - self.min = min - self.max = max - self.name = name - - def optimize(self): - """Optimize certain stacked wildcard patterns.""" - subpattern = None - if (self.content is not None and - len(self.content) == 1 and len(self.content[0]) == 1): - subpattern = self.content[0][0] - if self.min == 1 and self.max == 1: - if self.content is None: - return NodePattern(name=self.name) - if subpattern is not None and self.name == subpattern.name: - return subpattern.optimize() - if (self.min <= 1 and isinstance(subpattern, WildcardPattern) and - subpattern.min <= 1 and self.name == subpattern.name): - return WildcardPattern(subpattern.content, - self.min*subpattern.min, - self.max*subpattern.max, - subpattern.name) - return self - - def match(self, node, results=None): - """Does this pattern exactly match a node?""" - return self.match_seq([node], results) - - def match_seq(self, nodes, results=None): - """Does this pattern exactly match a sequence of nodes?""" - for c, r in self.generate_matches(nodes): - if c == len(nodes): - if results is not None: - results.update(r) - if self.name: - results[self.name] = list(nodes) - return True - return False - - def generate_matches(self, nodes): - """ - Generator yielding matches for a sequence of nodes. - - Args: - nodes: sequence of nodes - - Yields: - (count, results) tuples where: - count: the match comprises nodes[:count]; - results: dict containing named submatches. - """ - if self.content is None: - # Shortcut for special case (see __init__.__doc__) - for count in xrange(self.min, 1 + min(len(nodes), self.max)): - r = {} - if self.name: - r[self.name] = nodes[:count] - yield count, r - elif self.name == "bare_name": - yield self._bare_name_matches(nodes) - else: - # The reason for this is that hitting the recursion limit usually - # results in some ugly messages about how RuntimeErrors are being - # ignored. We don't do this on non-CPython implementation because - # they don't have this problem. - if hasattr(sys, "getrefcount"): - save_stderr = sys.stderr - sys.stderr = StringIO() - try: - for count, r in self._recursive_matches(nodes, 0): - if self.name: - r[self.name] = nodes[:count] - yield count, r - except RuntimeError: - # We fall back to the iterative pattern matching scheme if the recursive - # scheme hits the recursion limit. - for count, r in self._iterative_matches(nodes): - if self.name: - r[self.name] = nodes[:count] - yield count, r - finally: - if hasattr(sys, "getrefcount"): - sys.stderr = save_stderr - - def _iterative_matches(self, nodes): - """Helper to iteratively yield the matches.""" - nodelen = len(nodes) - if 0 >= self.min: - yield 0, {} - - results = [] - # generate matches that use just one alt from self.content - for alt in self.content: - for c, r in generate_matches(alt, nodes): - yield c, r - results.append((c, r)) - - # for each match, iterate down the nodes - while results: - new_results = [] - for c0, r0 in results: - # stop if the entire set of nodes has been matched - if c0 < nodelen and c0 <= self.max: - for alt in self.content: - for c1, r1 in generate_matches(alt, nodes[c0:]): - if c1 > 0: - r = {} - r.update(r0) - r.update(r1) - yield c0 + c1, r - new_results.append((c0 + c1, r)) - results = new_results - - def _bare_name_matches(self, nodes): - """Special optimized matcher for bare_name.""" - count = 0 - r = {} - done = False - max = len(nodes) - while not done and count < max: - done = True - for leaf in self.content: - if leaf[0].match(nodes[count], r): - count += 1 - done = False - break - r[self.name] = nodes[:count] - return count, r - - def _recursive_matches(self, nodes, count): - """Helper to recursively yield the matches.""" - assert self.content is not None - if count >= self.min: - yield 0, {} - if count < self.max: - for alt in self.content: - for c0, r0 in generate_matches(alt, nodes): - for c1, r1 in self._recursive_matches(nodes[c0:], count+1): - r = {} - r.update(r0) - r.update(r1) - yield c0 + c1, r - - -class NegatedPattern(BasePattern): - - def __init__(self, content=None): - """ - Initializer. - - The argument is either a pattern or None. If it is None, this - only matches an empty sequence (effectively '$' in regex - lingo). If it is not None, this matches whenever the argument - pattern doesn't have any matches. - """ - if content is not None: - assert isinstance(content, BasePattern), repr(content) - self.content = content - - def match(self, node): - # We never match a node in its entirety - return False - - def match_seq(self, nodes): - # We only match an empty sequence of nodes in its entirety - return len(nodes) == 0 - - def generate_matches(self, nodes): - if self.content is None: - # Return a match if there is an empty sequence - if len(nodes) == 0: - yield 0, {} - else: - # Return a match if the argument pattern has no matches - for c, r in self.content.generate_matches(nodes): - return - yield 0, {} - - -def generate_matches(patterns, nodes): - """ - Generator yielding matches for a sequence of patterns and nodes. - - Args: - patterns: a sequence of patterns - nodes: a sequence of nodes - - Yields: - (count, results) tuples where: - count: the entire sequence of patterns matches nodes[:count]; - results: dict containing named submatches. - """ - if not patterns: - yield 0, {} - else: - p, rest = patterns[0], patterns[1:] - for c0, r0 in p.generate_matches(nodes): - if not rest: - yield c0, r0 - else: - for c1, r1 in generate_matches(rest, nodes[c0:]): - r = {} - r.update(r0) - r.update(r1) - yield c0 + c1, r diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/refactor.py b/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/refactor.py deleted file mode 100644 index a4c168df..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/lib2to3/lib2to3/refactor.py +++ /dev/null @@ -1,747 +0,0 @@ -# Copyright 2006 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Refactoring framework. - -Used as a main program, this can refactor any number of files and/or -recursively descend down directories. Imported as a module, this -provides infrastructure to write your own refactoring tool. -""" - -from __future__ import with_statement - -__author__ = "Guido van Rossum " - - -# Python imports -import os -import sys -import logging -import operator -import collections -import StringIO -from itertools import chain - -# Local imports -from .pgen2 import driver, tokenize, token -from .fixer_util import find_root -from . import pytree, pygram -from . import btm_utils as bu -from . import btm_matcher as bm - - -def get_all_fix_names(fixer_pkg, remove_prefix=True): - """Return a sorted list of all available fix names in the given package.""" - pkg = __import__(fixer_pkg, [], [], ["*"]) - fixer_dir = os.path.dirname(pkg.__file__) - fix_names = [] - for name in sorted(os.listdir(fixer_dir)): - if name.startswith("fix_") and name.endswith(".py"): - if remove_prefix: - name = name[4:] - fix_names.append(name[:-3]) - return fix_names - - -class _EveryNode(Exception): - pass - - -def _get_head_types(pat): - """ Accepts a pytree Pattern Node and returns a set - of the pattern types which will match first. """ - - if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)): - # NodePatters must either have no type and no content - # or a type and content -- so they don't get any farther - # Always return leafs - if pat.type is None: - raise _EveryNode - return set([pat.type]) - - if isinstance(pat, pytree.NegatedPattern): - if pat.content: - return _get_head_types(pat.content) - raise _EveryNode # Negated Patterns don't have a type - - if isinstance(pat, pytree.WildcardPattern): - # Recurse on each node in content - r = set() - for p in pat.content: - for x in p: - r.update(_get_head_types(x)) - return r - - raise Exception("Oh no! I don't understand pattern %s" %(pat)) - - -def _get_headnode_dict(fixer_list): - """ Accepts a list of fixers and returns a dictionary - of head node type --> fixer list. """ - head_nodes = collections.defaultdict(list) - every = [] - for fixer in fixer_list: - if fixer.pattern: - try: - heads = _get_head_types(fixer.pattern) - except _EveryNode: - every.append(fixer) - else: - for node_type in heads: - head_nodes[node_type].append(fixer) - else: - if fixer._accept_type is not None: - head_nodes[fixer._accept_type].append(fixer) - else: - every.append(fixer) - for node_type in chain(pygram.python_grammar.symbol2number.itervalues(), - pygram.python_grammar.tokens): - head_nodes[node_type].extend(every) - return dict(head_nodes) - - -def get_fixers_from_package(pkg_name): - """ - Return the fully qualified names for fixers in the package pkg_name. - """ - return [pkg_name + "." + fix_name - for fix_name in get_all_fix_names(pkg_name, False)] - -def _identity(obj): - return obj - -if sys.version_info < (3, 0): - import codecs - _open_with_encoding = codecs.open - # codecs.open doesn't translate newlines sadly. - def _from_system_newlines(input): - return input.replace(u"\r\n", u"\n") - def _to_system_newlines(input): - if os.linesep != "\n": - return input.replace(u"\n", os.linesep) - else: - return input -else: - _open_with_encoding = open - _from_system_newlines = _identity - _to_system_newlines = _identity - - -def _detect_future_features(source): - have_docstring = False - gen = tokenize.generate_tokens(StringIO.StringIO(source).readline) - def advance(): - tok = gen.next() - return tok[0], tok[1] - ignore = frozenset((token.NEWLINE, tokenize.NL, token.COMMENT)) - features = set() - try: - while True: - tp, value = advance() - if tp in ignore: - continue - elif tp == token.STRING: - if have_docstring: - break - have_docstring = True - elif tp == token.NAME and value == u"from": - tp, value = advance() - if tp != token.NAME or value != u"__future__": - break - tp, value = advance() - if tp != token.NAME or value != u"import": - break - tp, value = advance() - if tp == token.OP and value == u"(": - tp, value = advance() - while tp == token.NAME: - features.add(value) - tp, value = advance() - if tp != token.OP or value != u",": - break - tp, value = advance() - else: - break - except StopIteration: - pass - return frozenset(features) - - -class FixerError(Exception): - """A fixer could not be loaded.""" - - -class RefactoringTool(object): - - _default_options = {"print_function" : False, - "write_unchanged_files" : False} - - CLASS_PREFIX = "Fix" # The prefix for fixer classes - FILE_PREFIX = "fix_" # The prefix for modules with a fixer within - - def __init__(self, fixer_names, options=None, explicit=None): - """Initializer. - - Args: - fixer_names: a list of fixers to import - options: an dict with configuration. - explicit: a list of fixers to run even if they are explicit. - """ - self.fixers = fixer_names - self.explicit = explicit or [] - self.options = self._default_options.copy() - if options is not None: - self.options.update(options) - if self.options["print_function"]: - self.grammar = pygram.python_grammar_no_print_statement - else: - self.grammar = pygram.python_grammar - # When this is True, the refactor*() methods will call write_file() for - # files processed even if they were not changed during refactoring. If - # and only if the refactor method's write parameter was True. - self.write_unchanged_files = self.options.get("write_unchanged_files") - self.errors = [] - self.logger = logging.getLogger("RefactoringTool") - self.fixer_log = [] - self.wrote = False - self.driver = driver.Driver(self.grammar, - convert=pytree.convert, - logger=self.logger) - self.pre_order, self.post_order = self.get_fixers() - - - self.files = [] # List of files that were or should be modified - - self.BM = bm.BottomMatcher() - self.bmi_pre_order = [] # Bottom Matcher incompatible fixers - self.bmi_post_order = [] - - for fixer in chain(self.post_order, self.pre_order): - if fixer.BM_compatible: - self.BM.add_fixer(fixer) - # remove fixers that will be handled by the bottom-up - # matcher - elif fixer in self.pre_order: - self.bmi_pre_order.append(fixer) - elif fixer in self.post_order: - self.bmi_post_order.append(fixer) - - self.bmi_pre_order_heads = _get_headnode_dict(self.bmi_pre_order) - self.bmi_post_order_heads = _get_headnode_dict(self.bmi_post_order) - - - - def get_fixers(self): - """Inspects the options to load the requested patterns and handlers. - - Returns: - (pre_order, post_order), where pre_order is the list of fixers that - want a pre-order AST traversal, and post_order is the list that want - post-order traversal. - """ - pre_order_fixers = [] - post_order_fixers = [] - for fix_mod_path in self.fixers: - mod = __import__(fix_mod_path, {}, {}, ["*"]) - fix_name = fix_mod_path.rsplit(".", 1)[-1] - if fix_name.startswith(self.FILE_PREFIX): - fix_name = fix_name[len(self.FILE_PREFIX):] - parts = fix_name.split("_") - class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts]) - try: - fix_class = getattr(mod, class_name) - except AttributeError: - raise FixerError("Can't find %s.%s" % (fix_name, class_name)) - fixer = fix_class(self.options, self.fixer_log) - if fixer.explicit and self.explicit is not True and \ - fix_mod_path not in self.explicit: - self.log_message("Skipping implicit fixer: %s", fix_name) - continue - - self.log_debug("Adding transformation: %s", fix_name) - if fixer.order == "pre": - pre_order_fixers.append(fixer) - elif fixer.order == "post": - post_order_fixers.append(fixer) - else: - raise FixerError("Illegal fixer order: %r" % fixer.order) - - key_func = operator.attrgetter("run_order") - pre_order_fixers.sort(key=key_func) - post_order_fixers.sort(key=key_func) - return (pre_order_fixers, post_order_fixers) - - def log_error(self, msg, *args, **kwds): - """Called when an error occurs.""" - raise - - def log_message(self, msg, *args): - """Hook to log a message.""" - if args: - msg = msg % args - self.logger.info(msg) - - def log_debug(self, msg, *args): - if args: - msg = msg % args - self.logger.debug(msg) - - def print_output(self, old_text, new_text, filename, equal): - """Called with the old version, new version, and filename of a - refactored file.""" - pass - - def refactor(self, items, write=False, doctests_only=False): - """Refactor a list of files and directories.""" - - for dir_or_file in items: - if os.path.isdir(dir_or_file): - self.refactor_dir(dir_or_file, write, doctests_only) - else: - self.refactor_file(dir_or_file, write, doctests_only) - - def refactor_dir(self, dir_name, write=False, doctests_only=False): - """Descends down a directory and refactor every Python file found. - - Python files are assumed to have a .py extension. - - Files and subdirectories starting with '.' are skipped. - """ - py_ext = os.extsep + "py" - for dirpath, dirnames, filenames in os.walk(dir_name): - self.log_debug("Descending into %s", dirpath) - dirnames.sort() - filenames.sort() - for name in filenames: - if (not name.startswith(".") and - os.path.splitext(name)[1] == py_ext): - fullname = os.path.join(dirpath, name) - self.refactor_file(fullname, write, doctests_only) - # Modify dirnames in-place to remove subdirs with leading dots - dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")] - - def _read_python_source(self, filename): - """ - Do our best to decode a Python source file correctly. - """ - try: - f = open(filename, "rb") - except IOError as err: - self.log_error("Can't open %s: %s", filename, err) - return None, None - try: - encoding = tokenize.detect_encoding(f.readline)[0] - finally: - f.close() - with _open_with_encoding(filename, "r", encoding=encoding) as f: - return _from_system_newlines(f.read()), encoding - - def refactor_file(self, filename, write=False, doctests_only=False): - """Refactors a file.""" - input, encoding = self._read_python_source(filename) - if input is None: - # Reading the file failed. - return - input += u"\n" # Silence certain parse errors - if doctests_only: - self.log_debug("Refactoring doctests in %s", filename) - output = self.refactor_docstring(input, filename) - if self.write_unchanged_files or output != input: - self.processed_file(output, filename, input, write, encoding) - else: - self.log_debug("No doctest changes in %s", filename) - else: - tree = self.refactor_string(input, filename) - if self.write_unchanged_files or (tree and tree.was_changed): - # The [:-1] is to take off the \n we added earlier - self.processed_file(unicode(tree)[:-1], filename, - write=write, encoding=encoding) - else: - self.log_debug("No changes in %s", filename) - - def refactor_string(self, data, name): - """Refactor a given input string. - - Args: - data: a string holding the code to be refactored. - name: a human-readable name for use in error/log messages. - - Returns: - An AST corresponding to the refactored input stream; None if - there were errors during the parse. - """ - features = _detect_future_features(data) - if "print_function" in features: - self.driver.grammar = pygram.python_grammar_no_print_statement - try: - tree = self.driver.parse_string(data) - except Exception as err: - self.log_error("Can't parse %s: %s: %s", - name, err.__class__.__name__, err) - return - finally: - self.driver.grammar = self.grammar - tree.future_features = features - self.log_debug("Refactoring %s", name) - self.refactor_tree(tree, name) - return tree - - def refactor_stdin(self, doctests_only=False): - input = sys.stdin.read() - if doctests_only: - self.log_debug("Refactoring doctests in stdin") - output = self.refactor_docstring(input, "") - if self.write_unchanged_files or output != input: - self.processed_file(output, "", input) - else: - self.log_debug("No doctest changes in stdin") - else: - tree = self.refactor_string(input, "") - if self.write_unchanged_files or (tree and tree.was_changed): - self.processed_file(unicode(tree), "", input) - else: - self.log_debug("No changes in stdin") - - def refactor_tree(self, tree, name): - """Refactors a parse tree (modifying the tree in place). - - For compatible patterns the bottom matcher module is - used. Otherwise the tree is traversed node-to-node for - matches. - - Args: - tree: a pytree.Node instance representing the root of the tree - to be refactored. - name: a human-readable name for this tree. - - Returns: - True if the tree was modified, False otherwise. - """ - - for fixer in chain(self.pre_order, self.post_order): - fixer.start_tree(tree, name) - - #use traditional matching for the incompatible fixers - self.traverse_by(self.bmi_pre_order_heads, tree.pre_order()) - self.traverse_by(self.bmi_post_order_heads, tree.post_order()) - - # obtain a set of candidate nodes - match_set = self.BM.run(tree.leaves()) - - while any(match_set.values()): - for fixer in self.BM.fixers: - if fixer in match_set and match_set[fixer]: - #sort by depth; apply fixers from bottom(of the AST) to top - match_set[fixer].sort(key=pytree.Base.depth, reverse=True) - - if fixer.keep_line_order: - #some fixers(eg fix_imports) must be applied - #with the original file's line order - match_set[fixer].sort(key=pytree.Base.get_lineno) - - for node in list(match_set[fixer]): - if node in match_set[fixer]: - match_set[fixer].remove(node) - - try: - find_root(node) - except ValueError: - # this node has been cut off from a - # previous transformation ; skip - continue - - if node.fixers_applied and fixer in node.fixers_applied: - # do not apply the same fixer again - continue - - results = fixer.match(node) - - if results: - new = fixer.transform(node, results) - if new is not None: - node.replace(new) - #new.fixers_applied.append(fixer) - for node in new.post_order(): - # do not apply the fixer again to - # this or any subnode - if not node.fixers_applied: - node.fixers_applied = [] - node.fixers_applied.append(fixer) - - # update the original match set for - # the added code - new_matches = self.BM.run(new.leaves()) - for fxr in new_matches: - if not fxr in match_set: - match_set[fxr]=[] - - match_set[fxr].extend(new_matches[fxr]) - - for fixer in chain(self.pre_order, self.post_order): - fixer.finish_tree(tree, name) - return tree.was_changed - - def traverse_by(self, fixers, traversal): - """Traverse an AST, applying a set of fixers to each node. - - This is a helper method for refactor_tree(). - - Args: - fixers: a list of fixer instances. - traversal: a generator that yields AST nodes. - - Returns: - None - """ - if not fixers: - return - for node in traversal: - for fixer in fixers[node.type]: - results = fixer.match(node) - if results: - new = fixer.transform(node, results) - if new is not None: - node.replace(new) - node = new - - def processed_file(self, new_text, filename, old_text=None, write=False, - encoding=None): - """ - Called when a file has been refactored and there may be changes. - """ - self.files.append(filename) - if old_text is None: - old_text = self._read_python_source(filename)[0] - if old_text is None: - return - equal = old_text == new_text - self.print_output(old_text, new_text, filename, equal) - if equal: - self.log_debug("No changes to %s", filename) - if not self.write_unchanged_files: - return - if write: - self.write_file(new_text, filename, old_text, encoding) - else: - self.log_debug("Not writing changes to %s", filename) - - def write_file(self, new_text, filename, old_text, encoding=None): - """Writes a string to a file. - - It first shows a unified diff between the old text and the new text, and - then rewrites the file; the latter is only done if the write option is - set. - """ - try: - f = _open_with_encoding(filename, "w", encoding=encoding) - except os.error as err: - self.log_error("Can't create %s: %s", filename, err) - return - try: - f.write(_to_system_newlines(new_text)) - except os.error as err: - self.log_error("Can't write %s: %s", filename, err) - finally: - f.close() - self.log_debug("Wrote changes to %s", filename) - self.wrote = True - - PS1 = ">>> " - PS2 = "... " - - def refactor_docstring(self, input, filename): - """Refactors a docstring, looking for doctests. - - This returns a modified version of the input string. It looks - for doctests, which start with a ">>>" prompt, and may be - continued with "..." prompts, as long as the "..." is indented - the same as the ">>>". - - (Unfortunately we can't use the doctest module's parser, - since, like most parsers, it is not geared towards preserving - the original source.) - """ - result = [] - block = None - block_lineno = None - indent = None - lineno = 0 - for line in input.splitlines(True): - lineno += 1 - if line.lstrip().startswith(self.PS1): - if block is not None: - result.extend(self.refactor_doctest(block, block_lineno, - indent, filename)) - block_lineno = lineno - block = [line] - i = line.find(self.PS1) - indent = line[:i] - elif (indent is not None and - (line.startswith(indent + self.PS2) or - line == indent + self.PS2.rstrip() + u"\n")): - block.append(line) - else: - if block is not None: - result.extend(self.refactor_doctest(block, block_lineno, - indent, filename)) - block = None - indent = None - result.append(line) - if block is not None: - result.extend(self.refactor_doctest(block, block_lineno, - indent, filename)) - return u"".join(result) - - def refactor_doctest(self, block, lineno, indent, filename): - """Refactors one doctest. - - A doctest is given as a block of lines, the first of which starts - with ">>>" (possibly indented), while the remaining lines start - with "..." (identically indented). - - """ - try: - tree = self.parse_block(block, lineno, indent) - except Exception as err: - if self.logger.isEnabledFor(logging.DEBUG): - for line in block: - self.log_debug("Source: %s", line.rstrip(u"\n")) - self.log_error("Can't parse docstring in %s line %s: %s: %s", - filename, lineno, err.__class__.__name__, err) - return block - if self.refactor_tree(tree, filename): - new = unicode(tree).splitlines(True) - # Undo the adjustment of the line numbers in wrap_toks() below. - clipped, new = new[:lineno-1], new[lineno-1:] - assert clipped == [u"\n"] * (lineno-1), clipped - if not new[-1].endswith(u"\n"): - new[-1] += u"\n" - block = [indent + self.PS1 + new.pop(0)] - if new: - block += [indent + self.PS2 + line for line in new] - return block - - def summarize(self): - if self.wrote: - were = "were" - else: - were = "need to be" - if not self.files: - self.log_message("No files %s modified.", were) - else: - self.log_message("Files that %s modified:", were) - for file in self.files: - self.log_message(file) - if self.fixer_log: - self.log_message("Warnings/messages while refactoring:") - for message in self.fixer_log: - self.log_message(message) - if self.errors: - if len(self.errors) == 1: - self.log_message("There was 1 error:") - else: - self.log_message("There were %d errors:", len(self.errors)) - for msg, args, kwds in self.errors: - self.log_message(msg, *args, **kwds) - - def parse_block(self, block, lineno, indent): - """Parses a block into a tree. - - This is necessary to get correct line number / offset information - in the parser diagnostics and embedded into the parse tree. - """ - tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent)) - tree.future_features = frozenset() - return tree - - def wrap_toks(self, block, lineno, indent): - """Wraps a tokenize stream to systematically modify start/end.""" - tokens = tokenize.generate_tokens(self.gen_lines(block, indent).next) - for type, value, (line0, col0), (line1, col1), line_text in tokens: - line0 += lineno - 1 - line1 += lineno - 1 - # Don't bother updating the columns; this is too complicated - # since line_text would also have to be updated and it would - # still break for tokens spanning lines. Let the user guess - # that the column numbers for doctests are relative to the - # end of the prompt string (PS1 or PS2). - yield type, value, (line0, col0), (line1, col1), line_text - - - def gen_lines(self, block, indent): - """Generates lines as expected by tokenize from a list of lines. - - This strips the first len(indent + self.PS1) characters off each line. - """ - prefix1 = indent + self.PS1 - prefix2 = indent + self.PS2 - prefix = prefix1 - for line in block: - if line.startswith(prefix): - yield line[len(prefix):] - elif line == prefix.rstrip() + u"\n": - yield u"\n" - else: - raise AssertionError("line=%r, prefix=%r" % (line, prefix)) - prefix = prefix2 - while True: - yield "" - - -class MultiprocessingUnsupported(Exception): - pass - - -class MultiprocessRefactoringTool(RefactoringTool): - - def __init__(self, *args, **kwargs): - super(MultiprocessRefactoringTool, self).__init__(*args, **kwargs) - self.queue = None - self.output_lock = None - - def refactor(self, items, write=False, doctests_only=False, - num_processes=1): - if num_processes == 1: - return super(MultiprocessRefactoringTool, self).refactor( - items, write, doctests_only) - try: - import multiprocessing - except ImportError: - raise MultiprocessingUnsupported - if self.queue is not None: - raise RuntimeError("already doing multiple processes") - self.queue = multiprocessing.JoinableQueue() - self.output_lock = multiprocessing.Lock() - processes = [multiprocessing.Process(target=self._child) - for i in xrange(num_processes)] - try: - for p in processes: - p.start() - super(MultiprocessRefactoringTool, self).refactor(items, write, - doctests_only) - finally: - self.queue.join() - for i in xrange(num_processes): - self.queue.put(None) - for p in processes: - if p.is_alive(): - p.join() - self.queue = None - - def _child(self): - task = self.queue.get() - while task is not None: - args, kwargs = task - try: - super(MultiprocessRefactoringTool, self).refactor_file( - *args, **kwargs) - finally: - self.queue.task_done() - task = self.queue.get() - - def refactor_file(self, *args, **kwargs): - if self.queue is not None: - self.queue.put((args, kwargs)) - else: - return super(MultiprocessRefactoringTool, self).refactor_file( - *args, **kwargs) diff --git a/src/debugpy/_vendored/pydevd/third_party/pep8/pycodestyle.py b/src/debugpy/_vendored/pydevd/third_party/pep8/pycodestyle.py deleted file mode 100644 index a4b11fe6..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/pep8/pycodestyle.py +++ /dev/null @@ -1,2325 +0,0 @@ -#!/usr/bin/env python -# pycodestyle.py - Check Python source code formatting, according to PEP 8 -# -# Copyright (C) 2006-2009 Johann C. Rocholl -# Copyright (C) 2009-2014 Florent Xicluna -# Copyright (C) 2014-2016 Ian Lee -# -# Permission is hereby granted, free of charge, to any person -# obtaining a copy of this software and associated documentation files -# (the "Software"), to deal in the Software without restriction, -# including without limitation the rights to use, copy, modify, merge, -# publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, -# subject to the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -r""" -Check Python source code formatting, according to PEP 8. - -For usage and a list of options, try this: -$ python pycodestyle.py -h - -This program and its regression test suite live here: -https://github.com/pycqa/pycodestyle - -Groups of errors and warnings: -E errors -W warnings -100 indentation -200 whitespace -300 blank lines -400 imports -500 line length -600 deprecation -700 statements -900 syntax error -""" -from __future__ import with_statement - -import inspect -import keyword -import os -import re -import sys -import time -import tokenize -import warnings - -from fnmatch import fnmatch -from optparse import OptionParser - -try: - from configparser import RawConfigParser - from io import TextIOWrapper -except ImportError: - from ConfigParser import RawConfigParser - -__version__ = '2.3.1' - -DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__,.tox' -DEFAULT_IGNORE = 'E121,E123,E126,E226,E24,E704,W503' -try: - if sys.platform == 'win32': - USER_CONFIG = os.path.expanduser(r'~\.pycodestyle') - else: - USER_CONFIG = os.path.join( - os.getenv('XDG_CONFIG_HOME') or os.path.expanduser('~/.config'), - 'pycodestyle' - ) -except ImportError: - USER_CONFIG = None - -PROJECT_CONFIG = ('setup.cfg', 'tox.ini') -TESTSUITE_PATH = os.path.join(os.path.dirname(__file__), 'testsuite') -MAX_LINE_LENGTH = 79 -REPORT_FORMAT = { - 'default': '%(path)s:%(row)d:%(col)d: %(code)s %(text)s', - 'pylint': '%(path)s:%(row)d: [%(code)s] %(text)s', -} - -PyCF_ONLY_AST = 1024 -SINGLETONS = frozenset(['False', 'None', 'True']) -KEYWORDS = frozenset(keyword.kwlist + ['print']) - SINGLETONS -UNARY_OPERATORS = frozenset(['>>', '**', '*', '+', '-']) -ARITHMETIC_OP = frozenset(['**', '*', '/', '//', '+', '-']) -WS_OPTIONAL_OPERATORS = ARITHMETIC_OP.union(['^', '&', '|', '<<', '>>', '%']) -WS_NEEDED_OPERATORS = frozenset([ - '**=', '*=', '/=', '//=', '+=', '-=', '!=', '<>', '<', '>', - '%=', '^=', '&=', '|=', '==', '<=', '>=', '<<=', '>>=', '=']) -WHITESPACE = frozenset(' \t') -NEWLINE = frozenset([tokenize.NL, tokenize.NEWLINE]) -SKIP_TOKENS = NEWLINE.union([tokenize.INDENT, tokenize.DEDENT]) -# ERRORTOKEN is triggered by backticks in Python 3 -SKIP_COMMENTS = SKIP_TOKENS.union([tokenize.COMMENT, tokenize.ERRORTOKEN]) -BENCHMARK_KEYS = ['directories', 'files', 'logical lines', 'physical lines'] - -INDENT_REGEX = re.compile(r'([ \t]*)') -RAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,') -RERAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,.*,\s*\w+\s*$') -ERRORCODE_REGEX = re.compile(r'\b[A-Z]\d{3}\b') -DOCSTRING_REGEX = re.compile(r'u?r?["\']') -EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]') -WHITESPACE_AFTER_COMMA_REGEX = re.compile(r'[,;:]\s*(?: |\t)') -COMPARE_SINGLETON_REGEX = re.compile(r'(\bNone|\bFalse|\bTrue)?\s*([=!]=)' - r'\s*(?(1)|(None|False|True))\b') -COMPARE_NEGATIVE_REGEX = re.compile(r'\b(not)\s+[^][)(}{ ]+\s+(in|is)\s') -COMPARE_TYPE_REGEX = re.compile(r'(?:[=!]=|is(?:\s+not)?)\s*type(?:s.\w+Type' - r'|\s*\(\s*([^)]*[^ )])\s*\))') -KEYWORD_REGEX = re.compile(r'(\s*)\b(?:%s)\b(\s*)' % r'|'.join(KEYWORDS)) -OPERATOR_REGEX = re.compile(r'(?:[^,\s])(\s*)(?:[-+*/|!<=>%&^]+)(\s*)') -LAMBDA_REGEX = re.compile(r'\blambda\b') -HUNK_REGEX = re.compile(r'^@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@.*$') -STARTSWITH_DEF_REGEX = re.compile(r'^(async\s+def|def)') -STARTSWITH_TOP_LEVEL_REGEX = re.compile(r'^(async\s+def\s+|def\s+|class\s+|@)') -STARTSWITH_INDENT_STATEMENT_REGEX = re.compile( - r'^\s*({0})'.format('|'.join(s.replace(' ', '\s+') for s in ( - 'def', 'async def', - 'for', 'async for', - 'if', 'elif', 'else', - 'try', 'except', 'finally', - 'with', 'async with', - 'class', - 'while', - ))) -) -DUNDER_REGEX = re.compile(r'^__([^\s]+)__ = ') - -# Work around Python < 2.6 behaviour, which does not generate NL after -# a comment which is on a line by itself. -COMMENT_WITH_NL = tokenize.generate_tokens(['#\n'].pop).send(None)[1] == '#\n' - - -############################################################################## -# Plugins (check functions) for physical lines -############################################################################## - - -def tabs_or_spaces(physical_line, indent_char): - r"""Never mix tabs and spaces. - - The most popular way of indenting Python is with spaces only. The - second-most popular way is with tabs only. Code indented with a mixture - of tabs and spaces should be converted to using spaces exclusively. When - invoking the Python command line interpreter with the -t option, it issues - warnings about code that illegally mixes tabs and spaces. When using -tt - these warnings become errors. These options are highly recommended! - - Okay: if a == 0:\n a = 1\n b = 1 - E101: if a == 0:\n a = 1\n\tb = 1 - """ - indent = INDENT_REGEX.match(physical_line).group(1) - for offset, char in enumerate(indent): - if char != indent_char: - return offset, "E101 indentation contains mixed spaces and tabs" - - -def tabs_obsolete(physical_line): - r"""For new projects, spaces-only are strongly recommended over tabs. - - Okay: if True:\n return - W191: if True:\n\treturn - """ - indent = INDENT_REGEX.match(physical_line).group(1) - if '\t' in indent: - return indent.index('\t'), "W191 indentation contains tabs" - - -def trailing_whitespace(physical_line): - r"""Trailing whitespace is superfluous. - - The warning returned varies on whether the line itself is blank, for easier - filtering for those who want to indent their blank lines. - - Okay: spam(1)\n# - W291: spam(1) \n# - W293: class Foo(object):\n \n bang = 12 - """ - physical_line = physical_line.rstrip('\n') # chr(10), newline - physical_line = physical_line.rstrip('\r') # chr(13), carriage return - physical_line = physical_line.rstrip('\x0c') # chr(12), form feed, ^L - stripped = physical_line.rstrip(' \t\v') - if physical_line != stripped: - if stripped: - return len(stripped), "W291 trailing whitespace" - else: - return 0, "W293 blank line contains whitespace" - - -def trailing_blank_lines(physical_line, lines, line_number, total_lines): - r"""Trailing blank lines are superfluous. - - Okay: spam(1) - W391: spam(1)\n - - However the last line should end with a new line (warning W292). - """ - if line_number == total_lines: - stripped_last_line = physical_line.rstrip() - if not stripped_last_line: - return 0, "W391 blank line at end of file" - if stripped_last_line == physical_line: - return len(physical_line), "W292 no newline at end of file" - - -def maximum_line_length(physical_line, max_line_length, multiline, noqa): - r"""Limit all lines to a maximum of 79 characters. - - There are still many devices around that are limited to 80 character - lines; plus, limiting windows to 80 characters makes it possible to have - several windows side-by-side. The default wrapping on such devices looks - ugly. Therefore, please limit all lines to a maximum of 79 characters. - For flowing long blocks of text (docstrings or comments), limiting the - length to 72 characters is recommended. - - Reports error E501. - """ - line = physical_line.rstrip() - length = len(line) - if length > max_line_length and not noqa: - # Special case for long URLs in multi-line docstrings or comments, - # but still report the error when the 72 first chars are whitespaces. - chunks = line.split() - if ((len(chunks) == 1 and multiline) or - (len(chunks) == 2 and chunks[0] == '#')) and \ - len(line) - len(chunks[-1]) < max_line_length - 7: - return - if hasattr(line, 'decode'): # Python 2 - # The line could contain multi-byte characters - try: - length = len(line.decode('utf-8')) - except UnicodeError: - pass - if length > max_line_length: - return (max_line_length, "E501 line too long " - "(%d > %d characters)" % (length, max_line_length)) - - -############################################################################## -# Plugins (check functions) for logical lines -############################################################################## - - -def blank_lines(logical_line, blank_lines, indent_level, line_number, - blank_before, previous_logical, - previous_unindented_logical_line, previous_indent_level, - lines): - r"""Separate top-level function and class definitions with two blank lines. - - Method definitions inside a class are separated by a single blank line. - - Extra blank lines may be used (sparingly) to separate groups of related - functions. Blank lines may be omitted between a bunch of related - one-liners (e.g. a set of dummy implementations). - - Use blank lines in functions, sparingly, to indicate logical sections. - - Okay: def a():\n pass\n\n\ndef b():\n pass - Okay: def a():\n pass\n\n\nasync def b():\n pass - Okay: def a():\n pass\n\n\n# Foo\n# Bar\n\ndef b():\n pass - Okay: default = 1\nfoo = 1 - Okay: classify = 1\nfoo = 1 - - E301: class Foo:\n b = 0\n def bar():\n pass - E302: def a():\n pass\n\ndef b(n):\n pass - E302: def a():\n pass\n\nasync def b(n):\n pass - E303: def a():\n pass\n\n\n\ndef b(n):\n pass - E303: def a():\n\n\n\n pass - E304: @decorator\n\ndef a():\n pass - E305: def a():\n pass\na() - E306: def a():\n def b():\n pass\n def c():\n pass - """ - if line_number < 3 and not previous_logical: - return # Don't expect blank lines before the first line - if previous_logical.startswith('@'): - if blank_lines: - yield 0, "E304 blank lines found after function decorator" - elif blank_lines > 2 or (indent_level and blank_lines == 2): - yield 0, "E303 too many blank lines (%d)" % blank_lines - elif STARTSWITH_TOP_LEVEL_REGEX.match(logical_line): - if indent_level: - if not (blank_before or previous_indent_level < indent_level or - DOCSTRING_REGEX.match(previous_logical)): - ancestor_level = indent_level - nested = False - # Search backwards for a def ancestor or tree root (top level). - for line in lines[line_number - 2::-1]: - if line.strip() and expand_indent(line) < ancestor_level: - ancestor_level = expand_indent(line) - nested = line.lstrip().startswith('def ') - if nested or ancestor_level == 0: - break - if nested: - yield 0, "E306 expected 1 blank line before a " \ - "nested definition, found 0" - else: - yield 0, "E301 expected 1 blank line, found 0" - elif blank_before != 2: - yield 0, "E302 expected 2 blank lines, found %d" % blank_before - elif (logical_line and not indent_level and blank_before != 2 and - previous_unindented_logical_line.startswith(('def ', 'class '))): - yield 0, "E305 expected 2 blank lines after " \ - "class or function definition, found %d" % blank_before - - -def extraneous_whitespace(logical_line): - r"""Avoid extraneous whitespace. - - Avoid extraneous whitespace in these situations: - - Immediately inside parentheses, brackets or braces. - - Immediately before a comma, semicolon, or colon. - - Okay: spam(ham[1], {eggs: 2}) - E201: spam( ham[1], {eggs: 2}) - E201: spam(ham[ 1], {eggs: 2}) - E201: spam(ham[1], { eggs: 2}) - E202: spam(ham[1], {eggs: 2} ) - E202: spam(ham[1 ], {eggs: 2}) - E202: spam(ham[1], {eggs: 2 }) - - E203: if x == 4: print x, y; x, y = y , x - E203: if x == 4: print x, y ; x, y = y, x - E203: if x == 4 : print x, y; x, y = y, x - """ - line = logical_line - for match in EXTRANEOUS_WHITESPACE_REGEX.finditer(line): - text = match.group() - char = text.strip() - found = match.start() - if text == char + ' ': - # assert char in '([{' - yield found + 1, "E201 whitespace after '%s'" % char - elif line[found - 1] != ',': - code = ('E202' if char in '}])' else 'E203') # if char in ',;:' - yield found, "%s whitespace before '%s'" % (code, char) - - -def whitespace_around_keywords(logical_line): - r"""Avoid extraneous whitespace around keywords. - - Okay: True and False - E271: True and False - E272: True and False - E273: True and\tFalse - E274: True\tand False - """ - for match in KEYWORD_REGEX.finditer(logical_line): - before, after = match.groups() - - if '\t' in before: - yield match.start(1), "E274 tab before keyword" - elif len(before) > 1: - yield match.start(1), "E272 multiple spaces before keyword" - - if '\t' in after: - yield match.start(2), "E273 tab after keyword" - elif len(after) > 1: - yield match.start(2), "E271 multiple spaces after keyword" - - -def missing_whitespace_after_import_keyword(logical_line): - r"""Multiple imports in form from x import (a, b, c) should have space - between import statement and parenthesised name list. - - Okay: from foo import (bar, baz) - E275: from foo import(bar, baz) - E275: from importable.module import(bar, baz) - """ - line = logical_line - indicator = ' import(' - if line.startswith('from '): - found = line.find(indicator) - if -1 < found: - pos = found + len(indicator) - 1 - yield pos, "E275 missing whitespace after keyword" - - -def missing_whitespace(logical_line): - r"""Each comma, semicolon or colon should be followed by whitespace. - - Okay: [a, b] - Okay: (3,) - Okay: a[1:4] - Okay: a[:4] - Okay: a[1:] - Okay: a[1:4:2] - E231: ['a','b'] - E231: foo(bar,baz) - E231: [{'a':'b'}] - """ - line = logical_line - for index in range(len(line) - 1): - char = line[index] - if char in ',;:' and line[index + 1] not in WHITESPACE: - before = line[:index] - if char == ':' and before.count('[') > before.count(']') and \ - before.rfind('{') < before.rfind('['): - continue # Slice syntax, no space required - if char == ',' and line[index + 1] == ')': - continue # Allow tuple with only one element: (3,) - yield index, "E231 missing whitespace after '%s'" % char - - -def indentation(logical_line, previous_logical, indent_char, - indent_level, previous_indent_level): - r"""Use 4 spaces per indentation level. - - For really old code that you don't want to mess up, you can continue to - use 8-space tabs. - - Okay: a = 1 - Okay: if a == 0:\n a = 1 - E111: a = 1 - E114: # a = 1 - - Okay: for item in items:\n pass - E112: for item in items:\npass - E115: for item in items:\n# Hi\n pass - - Okay: a = 1\nb = 2 - E113: a = 1\n b = 2 - E116: a = 1\n # b = 2 - """ - c = 0 if logical_line else 3 - tmpl = "E11%d %s" if logical_line else "E11%d %s (comment)" - if indent_level % 4: - yield 0, tmpl % (1 + c, "indentation is not a multiple of four") - indent_expect = previous_logical.endswith(':') - if indent_expect and indent_level <= previous_indent_level: - yield 0, tmpl % (2 + c, "expected an indented block") - elif not indent_expect and indent_level > previous_indent_level: - yield 0, tmpl % (3 + c, "unexpected indentation") - - -def continued_indentation(logical_line, tokens, indent_level, hang_closing, - indent_char, noqa, verbose): - r"""Continuation lines indentation. - - Continuation lines should align wrapped elements either vertically - using Python's implicit line joining inside parentheses, brackets - and braces, or using a hanging indent. - - When using a hanging indent these considerations should be applied: - - there should be no arguments on the first line, and - - further indentation should be used to clearly distinguish itself as a - continuation line. - - Okay: a = (\n) - E123: a = (\n ) - - Okay: a = (\n 42) - E121: a = (\n 42) - E122: a = (\n42) - E123: a = (\n 42\n ) - E124: a = (24,\n 42\n) - E125: if (\n b):\n pass - E126: a = (\n 42) - E127: a = (24,\n 42) - E128: a = (24,\n 42) - E129: if (a or\n b):\n pass - E131: a = (\n 42\n 24) - """ - first_row = tokens[0][2][0] - nrows = 1 + tokens[-1][2][0] - first_row - if noqa or nrows == 1: - return - - # indent_next tells us whether the next block is indented; assuming - # that it is indented by 4 spaces, then we should not allow 4-space - # indents on the final continuation line; in turn, some other - # indents are allowed to have an extra 4 spaces. - indent_next = logical_line.endswith(':') - - row = depth = 0 - valid_hangs = (4,) if indent_char != '\t' else (4, 8) - # remember how many brackets were opened on each line - parens = [0] * nrows - # relative indents of physical lines - rel_indent = [0] * nrows - # for each depth, collect a list of opening rows - open_rows = [[0]] - # for each depth, memorize the hanging indentation - hangs = [None] - # visual indents - indent_chances = {} - last_indent = tokens[0][2] - visual_indent = None - last_token_multiline = False - # for each depth, memorize the visual indent column - indent = [last_indent[1]] - if verbose >= 3: - print(">>> " + tokens[0][4].rstrip()) - - for token_type, text, start, end, line in tokens: - - newline = row < start[0] - first_row - if newline: - row = start[0] - first_row - newline = not last_token_multiline and token_type not in NEWLINE - - if newline: - # this is the beginning of a continuation line. - last_indent = start - if verbose >= 3: - print("... " + line.rstrip()) - - # record the initial indent. - rel_indent[row] = expand_indent(line) - indent_level - - # identify closing bracket - close_bracket = (token_type == tokenize.OP and text in ']})') - - # is the indent relative to an opening bracket line? - for open_row in reversed(open_rows[depth]): - hang = rel_indent[row] - rel_indent[open_row] - hanging_indent = hang in valid_hangs - if hanging_indent: - break - if hangs[depth]: - hanging_indent = (hang == hangs[depth]) - # is there any chance of visual indent? - visual_indent = (not close_bracket and hang > 0 and - indent_chances.get(start[1])) - - if close_bracket and indent[depth]: - # closing bracket for visual indent - if start[1] != indent[depth]: - yield (start, "E124 closing bracket does not match " - "visual indentation") - elif close_bracket and not hang: - # closing bracket matches indentation of opening bracket's line - if hang_closing: - yield start, "E133 closing bracket is missing indentation" - elif indent[depth] and start[1] < indent[depth]: - if visual_indent is not True: - # visual indent is broken - yield (start, "E128 continuation line " - "under-indented for visual indent") - elif hanging_indent or (indent_next and rel_indent[row] == 8): - # hanging indent is verified - if close_bracket and not hang_closing: - yield (start, "E123 closing bracket does not match " - "indentation of opening bracket's line") - hangs[depth] = hang - elif visual_indent is True: - # visual indent is verified - indent[depth] = start[1] - elif visual_indent in (text, str): - # ignore token lined up with matching one from a previous line - pass - else: - # indent is broken - if hang <= 0: - error = "E122", "missing indentation or outdented" - elif indent[depth]: - error = "E127", "over-indented for visual indent" - elif not close_bracket and hangs[depth]: - error = "E131", "unaligned for hanging indent" - else: - hangs[depth] = hang - if hang > 4: - error = "E126", "over-indented for hanging indent" - else: - error = "E121", "under-indented for hanging indent" - yield start, "%s continuation line %s" % error - - # look for visual indenting - if (parens[row] and - token_type not in (tokenize.NL, tokenize.COMMENT) and - not indent[depth]): - indent[depth] = start[1] - indent_chances[start[1]] = True - if verbose >= 4: - print("bracket depth %s indent to %s" % (depth, start[1])) - # deal with implicit string concatenation - elif (token_type in (tokenize.STRING, tokenize.COMMENT) or - text in ('u', 'ur', 'b', 'br')): - indent_chances[start[1]] = str - # special case for the "if" statement because len("if (") == 4 - elif not indent_chances and not row and not depth and text == 'if': - indent_chances[end[1] + 1] = True - elif text == ':' and line[end[1]:].isspace(): - open_rows[depth].append(row) - - # keep track of bracket depth - if token_type == tokenize.OP: - if text in '([{': - depth += 1 - indent.append(0) - hangs.append(None) - if len(open_rows) == depth: - open_rows.append([]) - open_rows[depth].append(row) - parens[row] += 1 - if verbose >= 4: - print("bracket depth %s seen, col %s, visual min = %s" % - (depth, start[1], indent[depth])) - elif text in ')]}' and depth > 0: - # parent indents should not be more than this one - prev_indent = indent.pop() or last_indent[1] - hangs.pop() - for d in range(depth): - if indent[d] > prev_indent: - indent[d] = 0 - for ind in list(indent_chances): - if ind >= prev_indent: - del indent_chances[ind] - del open_rows[depth + 1:] - depth -= 1 - if depth: - indent_chances[indent[depth]] = True - for idx in range(row, -1, -1): - if parens[idx]: - parens[idx] -= 1 - break - assert len(indent) == depth + 1 - if start[1] not in indent_chances: - # allow lining up tokens - indent_chances[start[1]] = text - - last_token_multiline = (start[0] != end[0]) - if last_token_multiline: - rel_indent[end[0] - first_row] = rel_indent[row] - - if indent_next and expand_indent(line) == indent_level + 4: - pos = (start[0], indent[0] + 4) - if visual_indent: - code = "E129 visually indented line" - else: - code = "E125 continuation line" - yield pos, "%s with same indent as next logical line" % code - - -def whitespace_before_parameters(logical_line, tokens): - r"""Avoid extraneous whitespace. - - Avoid extraneous whitespace in the following situations: - - before the open parenthesis that starts the argument list of a - function call. - - before the open parenthesis that starts an indexing or slicing. - - Okay: spam(1) - E211: spam (1) - - Okay: dict['key'] = list[index] - E211: dict ['key'] = list[index] - E211: dict['key'] = list [index] - """ - prev_type, prev_text, __, prev_end, __ = tokens[0] - for index in range(1, len(tokens)): - token_type, text, start, end, __ = tokens[index] - if (token_type == tokenize.OP and - text in '([' and - start != prev_end and - (prev_type == tokenize.NAME or prev_text in '}])') and - # Syntax "class A (B):" is allowed, but avoid it - (index < 2 or tokens[index - 2][1] != 'class') and - # Allow "return (a.foo for a in range(5))" - not keyword.iskeyword(prev_text)): - yield prev_end, "E211 whitespace before '%s'" % text - prev_type = token_type - prev_text = text - prev_end = end - - -def whitespace_around_operator(logical_line): - r"""Avoid extraneous whitespace around an operator. - - Okay: a = 12 + 3 - E221: a = 4 + 5 - E222: a = 4 + 5 - E223: a = 4\t+ 5 - E224: a = 4 +\t5 - """ - for match in OPERATOR_REGEX.finditer(logical_line): - before, after = match.groups() - - if '\t' in before: - yield match.start(1), "E223 tab before operator" - elif len(before) > 1: - yield match.start(1), "E221 multiple spaces before operator" - - if '\t' in after: - yield match.start(2), "E224 tab after operator" - elif len(after) > 1: - yield match.start(2), "E222 multiple spaces after operator" - - -def missing_whitespace_around_operator(logical_line, tokens): - r"""Surround operators with a single space on either side. - - - Always surround these binary operators with a single space on - either side: assignment (=), augmented assignment (+=, -= etc.), - comparisons (==, <, >, !=, <=, >=, in, not in, is, is not), - Booleans (and, or, not). - - - If operators with different priorities are used, consider adding - whitespace around the operators with the lowest priorities. - - Okay: i = i + 1 - Okay: submitted += 1 - Okay: x = x * 2 - 1 - Okay: hypot2 = x * x + y * y - Okay: c = (a + b) * (a - b) - Okay: foo(bar, key='word', *args, **kwargs) - Okay: alpha[:-i] - - E225: i=i+1 - E225: submitted +=1 - E225: x = x /2 - 1 - E225: z = x **y - E226: c = (a+b) * (a-b) - E226: hypot2 = x*x + y*y - E227: c = a|b - E228: msg = fmt%(errno, errmsg) - """ - parens = 0 - need_space = False - prev_type = tokenize.OP - prev_text = prev_end = None - for token_type, text, start, end, line in tokens: - if token_type in SKIP_COMMENTS: - continue - if text in ('(', 'lambda'): - parens += 1 - elif text == ')': - parens -= 1 - if need_space: - if start != prev_end: - # Found a (probably) needed space - if need_space is not True and not need_space[1]: - yield (need_space[0], - "E225 missing whitespace around operator") - need_space = False - elif text == '>' and prev_text in ('<', '-'): - # Tolerate the "<>" operator, even if running Python 3 - # Deal with Python 3's annotated return value "->" - pass - else: - if need_space is True or need_space[1]: - # A needed trailing space was not found - yield prev_end, "E225 missing whitespace around operator" - elif prev_text != '**': - code, optype = 'E226', 'arithmetic' - if prev_text == '%': - code, optype = 'E228', 'modulo' - elif prev_text not in ARITHMETIC_OP: - code, optype = 'E227', 'bitwise or shift' - yield (need_space[0], "%s missing whitespace " - "around %s operator" % (code, optype)) - need_space = False - elif token_type == tokenize.OP and prev_end is not None: - if text == '=' and parens: - # Allow keyword args or defaults: foo(bar=None). - pass - elif text in WS_NEEDED_OPERATORS: - need_space = True - elif text in UNARY_OPERATORS: - # Check if the operator is being used as a binary operator - # Allow unary operators: -123, -x, +1. - # Allow argument unpacking: foo(*args, **kwargs). - if (prev_text in '}])' if prev_type == tokenize.OP - else prev_text not in KEYWORDS): - need_space = None - elif text in WS_OPTIONAL_OPERATORS: - need_space = None - - if need_space is None: - # Surrounding space is optional, but ensure that - # trailing space matches opening space - need_space = (prev_end, start != prev_end) - elif need_space and start == prev_end: - # A needed opening space was not found - yield prev_end, "E225 missing whitespace around operator" - need_space = False - prev_type = token_type - prev_text = text - prev_end = end - - -def whitespace_around_comma(logical_line): - r"""Avoid extraneous whitespace after a comma or a colon. - - Note: these checks are disabled by default - - Okay: a = (1, 2) - E241: a = (1, 2) - E242: a = (1,\t2) - """ - line = logical_line - for m in WHITESPACE_AFTER_COMMA_REGEX.finditer(line): - found = m.start() + 1 - if '\t' in m.group(): - yield found, "E242 tab after '%s'" % m.group()[0] - else: - yield found, "E241 multiple spaces after '%s'" % m.group()[0] - - -def whitespace_around_named_parameter_equals(logical_line, tokens): - r"""Don't use spaces around the '=' sign in function arguments. - - Don't use spaces around the '=' sign when used to indicate a - keyword argument or a default parameter value. - - Okay: def complex(real, imag=0.0): - Okay: return magic(r=real, i=imag) - Okay: boolean(a == b) - Okay: boolean(a != b) - Okay: boolean(a <= b) - Okay: boolean(a >= b) - Okay: def foo(arg: int = 42): - Okay: async def foo(arg: int = 42): - - E251: def complex(real, imag = 0.0): - E251: return magic(r = real, i = imag) - """ - parens = 0 - no_space = False - prev_end = None - annotated_func_arg = False - in_def = bool(STARTSWITH_DEF_REGEX.match(logical_line)) - message = "E251 unexpected spaces around keyword / parameter equals" - for token_type, text, start, end, line in tokens: - if token_type == tokenize.NL: - continue - if no_space: - no_space = False - if start != prev_end: - yield (prev_end, message) - if token_type == tokenize.OP: - if text in '([': - parens += 1 - elif text in ')]': - parens -= 1 - elif in_def and text == ':' and parens == 1: - annotated_func_arg = True - elif parens and text == ',' and parens == 1: - annotated_func_arg = False - elif parens and text == '=' and not annotated_func_arg: - no_space = True - if start != prev_end: - yield (prev_end, message) - if not parens: - annotated_func_arg = False - - prev_end = end - - -def whitespace_before_comment(logical_line, tokens): - r"""Separate inline comments by at least two spaces. - - An inline comment is a comment on the same line as a statement. Inline - comments should be separated by at least two spaces from the statement. - They should start with a # and a single space. - - Each line of a block comment starts with a # and a single space - (unless it is indented text inside the comment). - - Okay: x = x + 1 # Increment x - Okay: x = x + 1 # Increment x - Okay: # Block comment - E261: x = x + 1 # Increment x - E262: x = x + 1 #Increment x - E262: x = x + 1 # Increment x - E265: #Block comment - E266: ### Block comment - """ - prev_end = (0, 0) - for token_type, text, start, end, line in tokens: - if token_type == tokenize.COMMENT: - inline_comment = line[:start[1]].strip() - if inline_comment: - if prev_end[0] == start[0] and start[1] < prev_end[1] + 2: - yield (prev_end, - "E261 at least two spaces before inline comment") - symbol, sp, comment = text.partition(' ') - bad_prefix = symbol not in '#:' and (symbol.lstrip('#')[:1] or '#') - if inline_comment: - if bad_prefix or comment[:1] in WHITESPACE: - yield start, "E262 inline comment should start with '# '" - elif bad_prefix and (bad_prefix != '!' or start[0] > 1): - if bad_prefix != '#': - yield start, "E265 block comment should start with '# '" - elif comment: - yield start, "E266 too many leading '#' for block comment" - elif token_type != tokenize.NL: - prev_end = end - - -def imports_on_separate_lines(logical_line): - r"""Place imports on separate lines. - - Okay: import os\nimport sys - E401: import sys, os - - Okay: from subprocess import Popen, PIPE - Okay: from myclas import MyClass - Okay: from foo.bar.yourclass import YourClass - Okay: import myclass - Okay: import foo.bar.yourclass - """ - line = logical_line - if line.startswith('import '): - found = line.find(',') - if -1 < found and ';' not in line[:found]: - yield found, "E401 multiple imports on one line" - - -def module_imports_on_top_of_file( - logical_line, indent_level, checker_state, noqa): - r"""Place imports at the top of the file. - - Always put imports at the top of the file, just after any module comments - and docstrings, and before module globals and constants. - - Okay: import os - Okay: # this is a comment\nimport os - Okay: '''this is a module docstring'''\nimport os - Okay: r'''this is a module docstring'''\nimport os - Okay: - try:\n\timport x\nexcept ImportError:\n\tpass\nelse:\n\tpass\nimport y - Okay: - try:\n\timport x\nexcept ImportError:\n\tpass\nfinally:\n\tpass\nimport y - E402: a=1\nimport os - E402: 'One string'\n"Two string"\nimport os - E402: a=1\nfrom sys import x - - Okay: if x:\n import os - """ - def is_string_literal(line): - if line[0] in 'uUbB': - line = line[1:] - if line and line[0] in 'rR': - line = line[1:] - return line and (line[0] == '"' or line[0] == "'") - - allowed_try_keywords = ('try', 'except', 'else', 'finally') - - if indent_level: # Allow imports in conditional statements or functions - return - if not logical_line: # Allow empty lines or comments - return - if noqa: - return - line = logical_line - if line.startswith('import ') or line.startswith('from '): - if checker_state.get('seen_non_imports', False): - yield 0, "E402 module level import not at top of file" - elif re.match(DUNDER_REGEX, line): - return - elif any(line.startswith(kw) for kw in allowed_try_keywords): - # Allow try, except, else, finally keywords intermixed with imports in - # order to support conditional importing - return - elif is_string_literal(line): - # The first literal is a docstring, allow it. Otherwise, report error. - if checker_state.get('seen_docstring', False): - checker_state['seen_non_imports'] = True - else: - checker_state['seen_docstring'] = True - else: - checker_state['seen_non_imports'] = True - - -def compound_statements(logical_line): - r"""Compound statements (on the same line) are generally discouraged. - - While sometimes it's okay to put an if/for/while with a small body - on the same line, never do this for multi-clause statements. - Also avoid folding such long lines! - - Always use a def statement instead of an assignment statement that - binds a lambda expression directly to a name. - - Okay: if foo == 'blah':\n do_blah_thing() - Okay: do_one() - Okay: do_two() - Okay: do_three() - - E701: if foo == 'blah': do_blah_thing() - E701: for x in lst: total += x - E701: while t < 10: t = delay() - E701: if foo == 'blah': do_blah_thing() - E701: else: do_non_blah_thing() - E701: try: something() - E701: finally: cleanup() - E701: if foo == 'blah': one(); two(); three() - E702: do_one(); do_two(); do_three() - E703: do_four(); # useless semicolon - E704: def f(x): return 2*x - E731: f = lambda x: 2*x - """ - line = logical_line - last_char = len(line) - 1 - found = line.find(':') - prev_found = 0 - counts = dict((char, 0) for char in '{}[]()') - while -1 < found < last_char: - update_counts(line[prev_found:found], counts) - if ((counts['{'] <= counts['}'] and # {'a': 1} (dict) - counts['['] <= counts[']'] and # [1:2] (slice) - counts['('] <= counts[')'])): # (annotation) - lambda_kw = LAMBDA_REGEX.search(line, 0, found) - if lambda_kw: - before = line[:lambda_kw.start()].rstrip() - if before[-1:] == '=' and isidentifier(before[:-1].strip()): - yield 0, ("E731 do not assign a lambda expression, use a " - "def") - break - if STARTSWITH_DEF_REGEX.match(line): - yield 0, "E704 multiple statements on one line (def)" - elif STARTSWITH_INDENT_STATEMENT_REGEX.match(line): - yield found, "E701 multiple statements on one line (colon)" - prev_found = found - found = line.find(':', found + 1) - found = line.find(';') - while -1 < found: - if found < last_char: - yield found, "E702 multiple statements on one line (semicolon)" - else: - yield found, "E703 statement ends with a semicolon" - found = line.find(';', found + 1) - - -def explicit_line_join(logical_line, tokens): - r"""Avoid explicit line join between brackets. - - The preferred way of wrapping long lines is by using Python's implied line - continuation inside parentheses, brackets and braces. Long lines can be - broken over multiple lines by wrapping expressions in parentheses. These - should be used in preference to using a backslash for line continuation. - - E502: aaa = [123, \\n 123] - E502: aaa = ("bbb " \\n "ccc") - - Okay: aaa = [123,\n 123] - Okay: aaa = ("bbb "\n "ccc") - Okay: aaa = "bbb " \\n "ccc" - Okay: aaa = 123 # \\ - """ - prev_start = prev_end = parens = 0 - comment = False - backslash = None - for token_type, text, start, end, line in tokens: - if token_type == tokenize.COMMENT: - comment = True - if start[0] != prev_start and parens and backslash and not comment: - yield backslash, "E502 the backslash is redundant between brackets" - if end[0] != prev_end: - if line.rstrip('\r\n').endswith('\\'): - backslash = (end[0], len(line.splitlines()[-1]) - 1) - else: - backslash = None - prev_start = prev_end = end[0] - else: - prev_start = start[0] - if token_type == tokenize.OP: - if text in '([{': - parens += 1 - elif text in ')]}': - parens -= 1 - - -def break_around_binary_operator(logical_line, tokens): - r""" - Avoid breaks before binary operators. - - The preferred place to break around a binary operator is after the - operator, not before it. - - W503: (width == 0\n + height == 0) - W503: (width == 0\n and height == 0) - - Okay: (width == 0 +\n height == 0) - Okay: foo(\n -x) - Okay: foo(x\n []) - Okay: x = '''\n''' + '' - Okay: foo(x,\n -y) - Okay: foo(x, # comment\n -y) - Okay: var = (1 &\n ~2) - Okay: var = (1 /\n -2) - Okay: var = (1 +\n -1 +\n -2) - """ - def is_binary_operator(token_type, text): - # The % character is strictly speaking a binary operator, but the - # common usage seems to be to put it next to the format parameters, - # after a line break. - return ((token_type == tokenize.OP or text in ['and', 'or']) and - text not in "()[]{},:.;@=%~") - - line_break = False - unary_context = True - # Previous non-newline token types and text - previous_token_type = None - previous_text = None - for token_type, text, start, end, line in tokens: - if token_type == tokenize.COMMENT: - continue - if ('\n' in text or '\r' in text) and token_type != tokenize.STRING: - line_break = True - else: - if (is_binary_operator(token_type, text) and line_break and - not unary_context and - not is_binary_operator(previous_token_type, - previous_text)): - yield start, "W503 line break before binary operator" - unary_context = text in '([{,;' - line_break = False - previous_token_type = token_type - previous_text = text - - -def comparison_to_singleton(logical_line, noqa): - r"""Comparison to singletons should use "is" or "is not". - - Comparisons to singletons like None should always be done - with "is" or "is not", never the equality operators. - - Okay: if arg is not None: - E711: if arg != None: - E711: if None == arg: - E712: if arg == True: - E712: if False == arg: - - Also, beware of writing if x when you really mean if x is not None -- - e.g. when testing whether a variable or argument that defaults to None was - set to some other value. The other value might have a type (such as a - container) that could be false in a boolean context! - """ - match = not noqa and COMPARE_SINGLETON_REGEX.search(logical_line) - if match: - singleton = match.group(1) or match.group(3) - same = (match.group(2) == '==') - - msg = "'if cond is %s:'" % (('' if same else 'not ') + singleton) - if singleton in ('None',): - code = 'E711' - else: - code = 'E712' - nonzero = ((singleton == 'True' and same) or - (singleton == 'False' and not same)) - msg += " or 'if %scond:'" % ('' if nonzero else 'not ') - yield match.start(2), ("%s comparison to %s should be %s" % - (code, singleton, msg)) - - -def comparison_negative(logical_line): - r"""Negative comparison should be done using "not in" and "is not". - - Okay: if x not in y:\n pass - Okay: assert (X in Y or X is Z) - Okay: if not (X in Y):\n pass - Okay: zz = x is not y - E713: Z = not X in Y - E713: if not X.B in Y:\n pass - E714: if not X is Y:\n pass - E714: Z = not X.B is Y - """ - match = COMPARE_NEGATIVE_REGEX.search(logical_line) - if match: - pos = match.start(1) - if match.group(2) == 'in': - yield pos, "E713 test for membership should be 'not in'" - else: - yield pos, "E714 test for object identity should be 'is not'" - - -def comparison_type(logical_line, noqa): - r"""Object type comparisons should always use isinstance(). - - Do not compare types directly. - - Okay: if isinstance(obj, int): - E721: if type(obj) is type(1): - - When checking if an object is a string, keep in mind that it might be a - unicode string too! In Python 2.3, str and unicode have a common base - class, basestring, so you can do: - - Okay: if isinstance(obj, basestring): - Okay: if type(a1) is type(b1): - """ - match = COMPARE_TYPE_REGEX.search(logical_line) - if match and not noqa: - inst = match.group(1) - if inst and isidentifier(inst) and inst not in SINGLETONS: - return # Allow comparison for types which are not obvious - yield match.start(), "E721 do not compare types, use 'isinstance()'" - - -def bare_except(logical_line, noqa): - r"""When catching exceptions, mention specific exceptions whenever possible. - - Okay: except Exception: - Okay: except BaseException: - E722: except: - """ - if noqa: - return - - regex = re.compile(r"except\s*:") - match = regex.match(logical_line) - if match: - yield match.start(), "E722 do not use bare except'" - - -def ambiguous_identifier(logical_line, tokens): - r"""Never use the characters 'l', 'O', or 'I' as variable names. - - In some fonts, these characters are indistinguishable from the numerals - one and zero. When tempted to use 'l', use 'L' instead. - - Okay: L = 0 - Okay: o = 123 - Okay: i = 42 - E741: l = 0 - E741: O = 123 - E741: I = 42 - - Variables can be bound in several other contexts, including class and - function definitions, 'global' and 'nonlocal' statements, exception - handlers, and 'with' statements. - - Okay: except AttributeError as o: - Okay: with lock as L: - E741: except AttributeError as O: - E741: with lock as l: - E741: global I - E741: nonlocal l - E742: class I(object): - E743: def l(x): - """ - idents_to_avoid = ('l', 'O', 'I') - prev_type, prev_text, prev_start, prev_end, __ = tokens[0] - for token_type, text, start, end, line in tokens[1:]: - ident = pos = None - # identifiers on the lhs of an assignment operator - if token_type == tokenize.OP and '=' in text: - if prev_text in idents_to_avoid: - ident = prev_text - pos = prev_start - # identifiers bound to a value with 'as', 'global', or 'nonlocal' - if prev_text in ('as', 'global', 'nonlocal'): - if text in idents_to_avoid: - ident = text - pos = start - if prev_text == 'class': - if text in idents_to_avoid: - yield start, "E742 ambiguous class definition '%s'" % text - if prev_text == 'def': - if text in idents_to_avoid: - yield start, "E743 ambiguous function definition '%s'" % text - if ident: - yield pos, "E741 ambiguous variable name '%s'" % ident - prev_text = text - prev_start = start - - -def python_3000_has_key(logical_line, noqa): - r"""The {}.has_key() method is removed in Python 3: use the 'in' operator. - - Okay: if "alph" in d:\n print d["alph"] - W601: assert d.has_key('alph') - """ - pos = logical_line.find('.has_key(') - if pos > -1 and not noqa: - yield pos, "W601 .has_key() is deprecated, use 'in'" - - -def python_3000_raise_comma(logical_line): - r"""When raising an exception, use "raise ValueError('message')". - - The older form is removed in Python 3. - - Okay: raise DummyError("Message") - W602: raise DummyError, "Message" - """ - match = RAISE_COMMA_REGEX.match(logical_line) - if match and not RERAISE_COMMA_REGEX.match(logical_line): - yield match.end() - 1, "W602 deprecated form of raising exception" - - -def python_3000_not_equal(logical_line): - r"""New code should always use != instead of <>. - - The older syntax is removed in Python 3. - - Okay: if a != 'no': - W603: if a <> 'no': - """ - pos = logical_line.find('<>') - if pos > -1: - yield pos, "W603 '<>' is deprecated, use '!='" - - -def python_3000_backticks(logical_line): - r"""Use repr() instead of backticks in Python 3. - - Okay: val = repr(1 + 2) - W604: val = `1 + 2` - """ - pos = logical_line.find('`') - if pos > -1: - yield pos, "W604 backticks are deprecated, use 'repr()'" - - -############################################################################## -# Helper functions -############################################################################## - - -if sys.version_info < (3,): - # Python 2: implicit encoding. - def readlines(filename): - """Read the source code.""" - with open(filename, 'rU') as f: - return f.readlines() - isidentifier = re.compile(r'[a-zA-Z_]\w*$').match - stdin_get_value = sys.stdin.read -else: - # Python 3 - def readlines(filename): - """Read the source code.""" - try: - with open(filename, 'rb') as f: - (coding, lines) = tokenize.detect_encoding(f.readline) - f = TextIOWrapper(f, coding, line_buffering=True) - return [line.decode(coding) for line in lines] + f.readlines() - except (LookupError, SyntaxError, UnicodeError): - # Fall back if file encoding is improperly declared - with open(filename, encoding='latin-1') as f: - return f.readlines() - isidentifier = str.isidentifier - - stdin_get_value = sys.stdin.read - -noqa = re.compile(r'# no(?:qa|pep8)\b', re.I).search - - -def expand_indent(line): - r"""Return the amount of indentation. - - Tabs are expanded to the next multiple of 8. - - >>> expand_indent(' ') - 4 - >>> expand_indent('\t') - 8 - >>> expand_indent(' \t') - 8 - >>> expand_indent(' \t') - 16 - """ - if '\t' not in line: - return len(line) - len(line.lstrip()) - result = 0 - for char in line: - if char == '\t': - result = result // 8 * 8 + 8 - elif char == ' ': - result += 1 - else: - break - return result - - -def mute_string(text): - """Replace contents with 'xxx' to prevent syntax matching. - - >>> mute_string('"abc"') - '"xxx"' - >>> mute_string("'''abc'''") - "'''xxx'''" - >>> mute_string("r'abc'") - "r'xxx'" - """ - # String modifiers (e.g. u or r) - start = text.index(text[-1]) + 1 - end = len(text) - 1 - # Triple quotes - if text[-3:] in ('"""', "'''"): - start += 2 - end -= 2 - return text[:start] + 'x' * (end - start) + text[end:] - - -def parse_udiff(diff, patterns=None, parent='.'): - """Return a dictionary of matching lines.""" - # For each file of the diff, the entry key is the filename, - # and the value is a set of row numbers to consider. - rv = {} - path = nrows = None - for line in diff.splitlines(): - if nrows: - if line[:1] != '-': - nrows -= 1 - continue - if line[:3] == '@@ ': - hunk_match = HUNK_REGEX.match(line) - (row, nrows) = [int(g or '1') for g in hunk_match.groups()] - rv[path].update(range(row, row + nrows)) - elif line[:3] == '+++': - path = line[4:].split('\t', 1)[0] - if path[:2] == 'b/': - path = path[2:] - rv[path] = set() - return dict([(os.path.join(parent, path), rows) - for (path, rows) in rv.items() - if rows and filename_match(path, patterns)]) - - -def normalize_paths(value, parent=os.curdir): - """Parse a comma-separated list of paths. - - Return a list of absolute paths. - """ - if not value: - return [] - if isinstance(value, list): - return value - paths = [] - for path in value.split(','): - path = path.strip() - if '/' in path: - path = os.path.abspath(os.path.join(parent, path)) - paths.append(path.rstrip('/')) - return paths - - -def filename_match(filename, patterns, default=True): - """Check if patterns contains a pattern that matches filename. - - If patterns is unspecified, this always returns True. - """ - if not patterns: - return default - return any(fnmatch(filename, pattern) for pattern in patterns) - - -def update_counts(s, counts): - r"""Adds one to the counts of each appearance of characters in s, - for characters in counts""" - for char in s: - if char in counts: - counts[char] += 1 - - -def _is_eol_token(token): - return token[0] in NEWLINE or token[4][token[3][1]:].lstrip() == '\\\n' - - -if COMMENT_WITH_NL: - def _is_eol_token(token, _eol_token=_is_eol_token): - return _eol_token(token) or (token[0] == tokenize.COMMENT and - token[1] == token[4]) - -############################################################################## -# Framework to run all checks -############################################################################## - - -_checks = {'physical_line': {}, 'logical_line': {}, 'tree': {}} - - -def _get_parameters(function): - if sys.version_info >= (3, 3): - return [parameter.name - for parameter - in inspect.signature(function).parameters.values() - if parameter.kind == parameter.POSITIONAL_OR_KEYWORD] - else: - return inspect.getargspec(function)[0] - - -def register_check(check, codes=None): - """Register a new check object.""" - def _add_check(check, kind, codes, args): - if check in _checks[kind]: - _checks[kind][check][0].extend(codes or []) - else: - _checks[kind][check] = (codes or [''], args) - if inspect.isfunction(check): - args = _get_parameters(check) - if args and args[0] in ('physical_line', 'logical_line'): - if codes is None: - codes = ERRORCODE_REGEX.findall(check.__doc__ or '') - _add_check(check, args[0], codes, args) - elif inspect.isclass(check): - if _get_parameters(check.__init__)[:2] == ['self', 'tree']: - _add_check(check, 'tree', codes, None) - - -def init_checks_registry(): - """Register all globally visible functions. - - The first argument name is either 'physical_line' or 'logical_line'. - """ - mod = inspect.getmodule(register_check) - for (name, function) in inspect.getmembers(mod, inspect.isfunction): - register_check(function) - - -init_checks_registry() - - -class Checker(object): - """Load a Python source file, tokenize it, check coding style.""" - - def __init__(self, filename=None, lines=None, - options=None, report=None, **kwargs): - if options is None: - options = StyleGuide(kwargs).options - else: - assert not kwargs - self._io_error = None - self._physical_checks = options.physical_checks - self._logical_checks = options.logical_checks - self._ast_checks = options.ast_checks - self.max_line_length = options.max_line_length - self.multiline = False # in a multiline string? - self.hang_closing = options.hang_closing - self.verbose = options.verbose - self.filename = filename - # Dictionary where a checker can store its custom state. - self._checker_states = {} - if filename is None: - self.filename = 'stdin' - self.lines = lines or [] - elif filename == '-': - self.filename = 'stdin' - self.lines = stdin_get_value().splitlines(True) - elif lines is None: - try: - self.lines = readlines(filename) - except IOError: - (exc_type, exc) = sys.exc_info()[:2] - self._io_error = '%s: %s' % (exc_type.__name__, exc) - self.lines = [] - else: - self.lines = lines - if self.lines: - ord0 = ord(self.lines[0][0]) - if ord0 in (0xef, 0xfeff): # Strip the UTF-8 BOM - if ord0 == 0xfeff: - self.lines[0] = self.lines[0][1:] - elif self.lines[0][:3] == '\xef\xbb\xbf': - self.lines[0] = self.lines[0][3:] - self.report = report or options.report - self.report_error = self.report.error - self.noqa = False - - def report_invalid_syntax(self): - """Check if the syntax is valid.""" - (exc_type, exc) = sys.exc_info()[:2] - if len(exc.args) > 1: - offset = exc.args[1] - if len(offset) > 2: - offset = offset[1:3] - else: - offset = (1, 0) - self.report_error(offset[0], offset[1] or 0, - 'E901 %s: %s' % (exc_type.__name__, exc.args[0]), - self.report_invalid_syntax) - - def readline(self): - """Get the next line from the input buffer.""" - if self.line_number >= self.total_lines: - return '' - line = self.lines[self.line_number] - self.line_number += 1 - if self.indent_char is None and line[:1] in WHITESPACE: - self.indent_char = line[0] - return line - - def run_check(self, check, argument_names): - """Run a check plugin.""" - arguments = [] - for name in argument_names: - arguments.append(getattr(self, name)) - return check(*arguments) - - def init_checker_state(self, name, argument_names): - """Prepare custom state for the specific checker plugin.""" - if 'checker_state' in argument_names: - self.checker_state = self._checker_states.setdefault(name, {}) - - def check_physical(self, line): - """Run all physical checks on a raw input line.""" - self.physical_line = line - for name, check, argument_names in self._physical_checks: - self.init_checker_state(name, argument_names) - result = self.run_check(check, argument_names) - if result is not None: - (offset, text) = result - self.report_error(self.line_number, offset, text, check) - if text[:4] == 'E101': - self.indent_char = line[0] - - def build_tokens_line(self): - """Build a logical line from tokens.""" - logical = [] - comments = [] - length = 0 - prev_row = prev_col = mapping = None - for token_type, text, start, end, line in self.tokens: - if token_type in SKIP_TOKENS: - continue - if not mapping: - mapping = [(0, start)] - if token_type == tokenize.COMMENT: - comments.append(text) - continue - if token_type == tokenize.STRING: - text = mute_string(text) - if prev_row: - (start_row, start_col) = start - if prev_row != start_row: # different row - prev_text = self.lines[prev_row - 1][prev_col - 1] - if prev_text == ',' or (prev_text not in '{[(' and - text not in '}])'): - text = ' ' + text - elif prev_col != start_col: # different column - text = line[prev_col:start_col] + text - logical.append(text) - length += len(text) - mapping.append((length, end)) - (prev_row, prev_col) = end - self.logical_line = ''.join(logical) - self.noqa = comments and noqa(''.join(comments)) - return mapping - - def check_logical(self): - """Build a line from tokens and run all logical checks on it.""" - self.report.increment_logical_line() - mapping = self.build_tokens_line() - - if not mapping: - return - - (start_row, start_col) = mapping[0][1] - start_line = self.lines[start_row - 1] - self.indent_level = expand_indent(start_line[:start_col]) - if self.blank_before < self.blank_lines: - self.blank_before = self.blank_lines - if self.verbose >= 2: - print(self.logical_line[:80].rstrip()) - for name, check, argument_names in self._logical_checks: - if self.verbose >= 4: - print(' ' + name) - self.init_checker_state(name, argument_names) - for offset, text in self.run_check(check, argument_names) or (): - if not isinstance(offset, tuple): - for token_offset, pos in mapping: - if offset <= token_offset: - break - offset = (pos[0], pos[1] + offset - token_offset) - self.report_error(offset[0], offset[1], text, check) - if self.logical_line: - self.previous_indent_level = self.indent_level - self.previous_logical = self.logical_line - if not self.indent_level: - self.previous_unindented_logical_line = self.logical_line - self.blank_lines = 0 - self.tokens = [] - - def check_ast(self): - """Build the file's AST and run all AST checks.""" - try: - tree = compile(''.join(self.lines), '', 'exec', PyCF_ONLY_AST) - except (ValueError, SyntaxError, TypeError): - return self.report_invalid_syntax() - for name, cls, __ in self._ast_checks: - checker = cls(tree, self.filename) - for lineno, offset, text, check in checker.run(): - if not self.lines or not noqa(self.lines[lineno - 1]): - self.report_error(lineno, offset, text, check) - - def generate_tokens(self): - """Tokenize the file, run physical line checks and yield tokens.""" - if self._io_error: - self.report_error(1, 0, 'E902 %s' % self._io_error, readlines) - tokengen = tokenize.generate_tokens(self.readline) - try: - for token in tokengen: - if token[2][0] > self.total_lines: - return - self.noqa = token[4] and noqa(token[4]) - self.maybe_check_physical(token) - yield token - except (SyntaxError, tokenize.TokenError): - self.report_invalid_syntax() - - def maybe_check_physical(self, token): - """If appropriate (based on token), check current physical line(s).""" - # Called after every token, but act only on end of line. - if _is_eol_token(token): - # Obviously, a newline token ends a single physical line. - self.check_physical(token[4]) - elif token[0] == tokenize.STRING and '\n' in token[1]: - # Less obviously, a string that contains newlines is a - # multiline string, either triple-quoted or with internal - # newlines backslash-escaped. Check every physical line in the - # string *except* for the last one: its newline is outside of - # the multiline string, so we consider it a regular physical - # line, and will check it like any other physical line. - # - # Subtleties: - # - we don't *completely* ignore the last line; if it contains - # the magical "# noqa" comment, we disable all physical - # checks for the entire multiline string - # - have to wind self.line_number back because initially it - # points to the last line of the string, and we want - # check_physical() to give accurate feedback - if noqa(token[4]): - return - self.multiline = True - self.line_number = token[2][0] - for line in token[1].split('\n')[:-1]: - self.check_physical(line + '\n') - self.line_number += 1 - self.multiline = False - - def check_all(self, expected=None, line_offset=0): - """Run all checks on the input file.""" - self.report.init_file(self.filename, self.lines, expected, line_offset) - self.total_lines = len(self.lines) - if self._ast_checks: - self.check_ast() - self.line_number = 0 - self.indent_char = None - self.indent_level = self.previous_indent_level = 0 - self.previous_logical = '' - self.previous_unindented_logical_line = '' - self.tokens = [] - self.blank_lines = self.blank_before = 0 - parens = 0 - for token in self.generate_tokens(): - self.tokens.append(token) - token_type, text = token[0:2] - if self.verbose >= 3: - if token[2][0] == token[3][0]: - pos = '[%s:%s]' % (token[2][1] or '', token[3][1]) - else: - pos = 'l.%s' % token[3][0] - print('l.%s\t%s\t%s\t%r' % - (token[2][0], pos, tokenize.tok_name[token[0]], text)) - if token_type == tokenize.OP: - if text in '([{': - parens += 1 - elif text in '}])': - parens -= 1 - elif not parens: - if token_type in NEWLINE: - if token_type == tokenize.NEWLINE: - self.check_logical() - self.blank_before = 0 - elif len(self.tokens) == 1: - # The physical line contains only this token. - self.blank_lines += 1 - del self.tokens[0] - else: - self.check_logical() - elif COMMENT_WITH_NL and token_type == tokenize.COMMENT: - if len(self.tokens) == 1: - # The comment also ends a physical line - token = list(token) - token[1] = text.rstrip('\r\n') - token[3] = (token[2][0], token[2][1] + len(token[1])) - self.tokens = [tuple(token)] - self.check_logical() - if self.tokens: - self.check_physical(self.lines[-1]) - self.check_logical() - return self.report.get_file_results() - - -class BaseReport(object): - """Collect the results of the checks.""" - - print_filename = False - - def __init__(self, options): - self._benchmark_keys = options.benchmark_keys - self._ignore_code = options.ignore_code - # Results - self.elapsed = 0 - self.total_errors = 0 - self.counters = dict.fromkeys(self._benchmark_keys, 0) - self.messages = {} - - def start(self): - """Start the timer.""" - self._start_time = time.time() - - def stop(self): - """Stop the timer.""" - self.elapsed = time.time() - self._start_time - - def init_file(self, filename, lines, expected, line_offset): - """Signal a new file.""" - self.filename = filename - self.lines = lines - self.expected = expected or () - self.line_offset = line_offset - self.file_errors = 0 - self.counters['files'] += 1 - self.counters['physical lines'] += len(lines) - - def increment_logical_line(self): - """Signal a new logical line.""" - self.counters['logical lines'] += 1 - - def error(self, line_number, offset, text, check): - """Report an error, according to options.""" - code = text[:4] - if self._ignore_code(code): - return - if code in self.counters: - self.counters[code] += 1 - else: - self.counters[code] = 1 - self.messages[code] = text[5:] - # Don't care about expected errors or warnings - if code in self.expected: - return - if self.print_filename and not self.file_errors: - print(self.filename) - self.file_errors += 1 - self.total_errors += 1 - return code - - def get_file_results(self): - """Return the count of errors and warnings for this file.""" - return self.file_errors - - def get_count(self, prefix=''): - """Return the total count of errors and warnings.""" - return sum([self.counters[key] - for key in self.messages if key.startswith(prefix)]) - - def get_statistics(self, prefix=''): - """Get statistics for message codes that start with the prefix. - - prefix='' matches all errors and warnings - prefix='E' matches all errors - prefix='W' matches all warnings - prefix='E4' matches all errors that have to do with imports - """ - return ['%-7s %s %s' % (self.counters[key], key, self.messages[key]) - for key in sorted(self.messages) if key.startswith(prefix)] - - def print_statistics(self, prefix=''): - """Print overall statistics (number of errors and warnings).""" - for line in self.get_statistics(prefix): - print(line) - - def print_benchmark(self): - """Print benchmark numbers.""" - print('%-7.2f %s' % (self.elapsed, 'seconds elapsed')) - if self.elapsed: - for key in self._benchmark_keys: - print('%-7d %s per second (%d total)' % - (self.counters[key] / self.elapsed, key, - self.counters[key])) - - -class FileReport(BaseReport): - """Collect the results of the checks and print only the filenames.""" - - print_filename = True - - -class StandardReport(BaseReport): - """Collect and print the results of the checks.""" - - def __init__(self, options): - super(StandardReport, self).__init__(options) - self._fmt = REPORT_FORMAT.get(options.format.lower(), - options.format) - self._repeat = options.repeat - self._show_source = options.show_source - self._show_pep8 = options.show_pep8 - - def init_file(self, filename, lines, expected, line_offset): - """Signal a new file.""" - self._deferred_print = [] - return super(StandardReport, self).init_file( - filename, lines, expected, line_offset) - - def error(self, line_number, offset, text, check): - """Report an error, according to options.""" - code = super(StandardReport, self).error(line_number, offset, - text, check) - if code and (self.counters[code] == 1 or self._repeat): - self._deferred_print.append( - (line_number, offset, code, text[5:], check.__doc__)) - return code - - def get_file_results(self): - """Print the result and return the overall count for this file.""" - self._deferred_print.sort() - for line_number, offset, code, text, doc in self._deferred_print: - print(self._fmt % { - 'path': self.filename, - 'row': self.line_offset + line_number, 'col': offset + 1, - 'code': code, 'text': text, - }) - if self._show_source: - if line_number > len(self.lines): - line = '' - else: - line = self.lines[line_number - 1] - print(line.rstrip()) - print(re.sub(r'\S', ' ', line[:offset]) + '^') - if self._show_pep8 and doc: - print(' ' + doc.strip()) - - # stdout is block buffered when not stdout.isatty(). - # line can be broken where buffer boundary since other processes - # write to same file. - # flush() after print() to avoid buffer boundary. - # Typical buffer size is 8192. line written safely when - # len(line) < 8192. - sys.stdout.flush() - return self.file_errors - - -class DiffReport(StandardReport): - """Collect and print the results for the changed lines only.""" - - def __init__(self, options): - super(DiffReport, self).__init__(options) - self._selected = options.selected_lines - - def error(self, line_number, offset, text, check): - if line_number not in self._selected[self.filename]: - return - return super(DiffReport, self).error(line_number, offset, text, check) - - -class StyleGuide(object): - """Initialize a PEP-8 instance with few options.""" - - def __init__(self, *args, **kwargs): - # build options from the command line - self.checker_class = kwargs.pop('checker_class', Checker) - parse_argv = kwargs.pop('parse_argv', False) - config_file = kwargs.pop('config_file', False) - parser = kwargs.pop('parser', None) - # build options from dict - options_dict = dict(*args, **kwargs) - arglist = None if parse_argv else options_dict.get('paths', None) - options, self.paths = process_options( - arglist, parse_argv, config_file, parser) - if options_dict: - options.__dict__.update(options_dict) - if 'paths' in options_dict: - self.paths = options_dict['paths'] - - self.runner = self.input_file - self.options = options - - if not options.reporter: - options.reporter = BaseReport if options.quiet else StandardReport - - options.select = tuple(options.select or ()) - if not (options.select or options.ignore or - options.testsuite or options.doctest) and DEFAULT_IGNORE: - # The default choice: ignore controversial checks - options.ignore = tuple(DEFAULT_IGNORE.split(',')) - else: - # Ignore all checks which are not explicitly selected - options.ignore = ('',) if options.select else tuple(options.ignore) - options.benchmark_keys = BENCHMARK_KEYS[:] - options.ignore_code = self.ignore_code - options.physical_checks = self.get_checks('physical_line') - options.logical_checks = self.get_checks('logical_line') - options.ast_checks = self.get_checks('tree') - self.init_report() - - def init_report(self, reporter=None): - """Initialize the report instance.""" - self.options.report = (reporter or self.options.reporter)(self.options) - return self.options.report - - def check_files(self, paths=None): - """Run all checks on the paths.""" - if paths is None: - paths = self.paths - report = self.options.report - runner = self.runner - report.start() - try: - for path in paths: - if os.path.isdir(path): - self.input_dir(path) - elif not self.excluded(path): - runner(path) - except KeyboardInterrupt: - print('... stopped') - report.stop() - return report - - def input_file(self, filename, lines=None, expected=None, line_offset=0): - """Run all checks on a Python source file.""" - if self.options.verbose: - print('checking %s' % filename) - fchecker = self.checker_class( - filename, lines=lines, options=self.options) - return fchecker.check_all(expected=expected, line_offset=line_offset) - - def input_dir(self, dirname): - """Check all files in this directory and all subdirectories.""" - dirname = dirname.rstrip('/') - if self.excluded(dirname): - return 0 - counters = self.options.report.counters - verbose = self.options.verbose - filepatterns = self.options.filename - runner = self.runner - for root, dirs, files in os.walk(dirname): - if verbose: - print('directory ' + root) - counters['directories'] += 1 - for subdir in sorted(dirs): - if self.excluded(subdir, root): - dirs.remove(subdir) - for filename in sorted(files): - # contain a pattern that matches? - if ((filename_match(filename, filepatterns) and - not self.excluded(filename, root))): - runner(os.path.join(root, filename)) - - def excluded(self, filename, parent=None): - """Check if the file should be excluded. - - Check if 'options.exclude' contains a pattern that matches filename. - """ - if not self.options.exclude: - return False - basename = os.path.basename(filename) - if filename_match(basename, self.options.exclude): - return True - if parent: - filename = os.path.join(parent, filename) - filename = os.path.abspath(filename) - return filename_match(filename, self.options.exclude) - - def ignore_code(self, code): - """Check if the error code should be ignored. - - If 'options.select' contains a prefix of the error code, - return False. Else, if 'options.ignore' contains a prefix of - the error code, return True. - """ - if len(code) < 4 and any(s.startswith(code) - for s in self.options.select): - return False - return (code.startswith(self.options.ignore) and - not code.startswith(self.options.select)) - - def get_checks(self, argument_name): - """Get all the checks for this category. - - Find all globally visible functions where the first argument name - starts with argument_name and which contain selected tests. - """ - checks = [] - for check, attrs in _checks[argument_name].items(): - (codes, args) = attrs - if any(not (code and self.ignore_code(code)) for code in codes): - checks.append((check.__name__, check, args)) - return sorted(checks) - - -def get_parser(prog='pycodestyle', version=__version__): - """Create the parser for the program.""" - parser = OptionParser(prog=prog, version=version, - usage="%prog [options] input ...") - parser.config_options = [ - 'exclude', 'filename', 'select', 'ignore', 'max-line-length', - 'hang-closing', 'count', 'format', 'quiet', 'show-pep8', - 'show-source', 'statistics', 'verbose'] - parser.add_option('-v', '--verbose', default=0, action='count', - help="print status messages, or debug with -vv") - parser.add_option('-q', '--quiet', default=0, action='count', - help="report only file names, or nothing with -qq") - parser.add_option('-r', '--repeat', default=True, action='store_true', - help="(obsolete) show all occurrences of the same error") - parser.add_option('--first', action='store_false', dest='repeat', - help="show first occurrence of each error") - parser.add_option('--exclude', metavar='patterns', default=DEFAULT_EXCLUDE, - help="exclude files or directories which match these " - "comma separated patterns (default: %default)") - parser.add_option('--filename', metavar='patterns', default='*.py', - help="when parsing directories, only check filenames " - "matching these comma separated patterns " - "(default: %default)") - parser.add_option('--select', metavar='errors', default='', - help="select errors and warnings (e.g. E,W6)") - parser.add_option('--ignore', metavar='errors', default='', - help="skip errors and warnings (e.g. E4,W) " - "(default: %s)" % DEFAULT_IGNORE) - parser.add_option('--show-source', action='store_true', - help="show source code for each error") - parser.add_option('--show-pep8', action='store_true', - help="show text of PEP 8 for each error " - "(implies --first)") - parser.add_option('--statistics', action='store_true', - help="count errors and warnings") - parser.add_option('--count', action='store_true', - help="print total number of errors and warnings " - "to standard error and set exit code to 1 if " - "total is not null") - parser.add_option('--max-line-length', type='int', metavar='n', - default=MAX_LINE_LENGTH, - help="set maximum allowed line length " - "(default: %default)") - parser.add_option('--hang-closing', action='store_true', - help="hang closing bracket instead of matching " - "indentation of opening bracket's line") - parser.add_option('--format', metavar='format', default='default', - help="set the error format [default|pylint|]") - parser.add_option('--diff', action='store_true', - help="report changes only within line number ranges in " - "the unified diff received on STDIN") - group = parser.add_option_group("Testing Options") - if os.path.exists(TESTSUITE_PATH): - group.add_option('--testsuite', metavar='dir', - help="run regression tests from dir") - group.add_option('--doctest', action='store_true', - help="run doctest on myself") - group.add_option('--benchmark', action='store_true', - help="measure processing speed") - return parser - - -def read_config(options, args, arglist, parser): - """Read and parse configurations. - - If a config file is specified on the command line with the "--config" - option, then only it is used for configuration. - - Otherwise, the user configuration (~/.config/pycodestyle) and any local - configurations in the current directory or above will be merged together - (in that order) using the read method of ConfigParser. - """ - config = RawConfigParser() - - cli_conf = options.config - - local_dir = os.curdir - - if USER_CONFIG and os.path.isfile(USER_CONFIG): - if options.verbose: - print('user configuration: %s' % USER_CONFIG) - config.read(USER_CONFIG) - - parent = tail = args and os.path.abspath(os.path.commonprefix(args)) - while tail: - if config.read(os.path.join(parent, fn) for fn in PROJECT_CONFIG): - local_dir = parent - if options.verbose: - print('local configuration: in %s' % parent) - break - (parent, tail) = os.path.split(parent) - - if cli_conf and os.path.isfile(cli_conf): - if options.verbose: - print('cli configuration: %s' % cli_conf) - config.read(cli_conf) - - pycodestyle_section = None - if config.has_section(parser.prog): - pycodestyle_section = parser.prog - elif config.has_section('pep8'): - pycodestyle_section = 'pep8' # Deprecated - warnings.warn('[pep8] section is deprecated. Use [pycodestyle].') - - if pycodestyle_section: - option_list = dict([(o.dest, o.type or o.action) - for o in parser.option_list]) - - # First, read the default values - (new_options, __) = parser.parse_args([]) - - # Second, parse the configuration - for opt in config.options(pycodestyle_section): - if opt.replace('_', '-') not in parser.config_options: - print(" unknown option '%s' ignored" % opt) - continue - if options.verbose > 1: - print(" %s = %s" % (opt, - config.get(pycodestyle_section, opt))) - normalized_opt = opt.replace('-', '_') - opt_type = option_list[normalized_opt] - if opt_type in ('int', 'count'): - value = config.getint(pycodestyle_section, opt) - elif opt_type in ('store_true', 'store_false'): - value = config.getboolean(pycodestyle_section, opt) - else: - value = config.get(pycodestyle_section, opt) - if normalized_opt == 'exclude': - value = normalize_paths(value, local_dir) - setattr(new_options, normalized_opt, value) - - # Third, overwrite with the command-line options - (options, __) = parser.parse_args(arglist, values=new_options) - options.doctest = options.testsuite = False - return options - - -def process_options(arglist=None, parse_argv=False, config_file=None, - parser=None): - """Process options passed either via arglist or via command line args. - - Passing in the ``config_file`` parameter allows other tools, such as flake8 - to specify their own options to be processed in pycodestyle. - """ - if not parser: - parser = get_parser() - if not parser.has_option('--config'): - group = parser.add_option_group("Configuration", description=( - "The project options are read from the [%s] section of the " - "tox.ini file or the setup.cfg file located in any parent folder " - "of the path(s) being processed. Allowed options are: %s." % - (parser.prog, ', '.join(parser.config_options)))) - group.add_option('--config', metavar='path', default=config_file, - help="user config file location") - # Don't read the command line if the module is used as a library. - if not arglist and not parse_argv: - arglist = [] - # If parse_argv is True and arglist is None, arguments are - # parsed from the command line (sys.argv) - (options, args) = parser.parse_args(arglist) - options.reporter = None - - if options.ensure_value('testsuite', False): - args.append(options.testsuite) - elif not options.ensure_value('doctest', False): - if parse_argv and not args: - if options.diff or any(os.path.exists(name) - for name in PROJECT_CONFIG): - args = ['.'] - else: - parser.error('input not specified') - options = read_config(options, args, arglist, parser) - options.reporter = parse_argv and options.quiet == 1 and FileReport - - options.filename = _parse_multi_options(options.filename) - options.exclude = normalize_paths(options.exclude) - options.select = _parse_multi_options(options.select) - options.ignore = _parse_multi_options(options.ignore) - - if options.diff: - options.reporter = DiffReport - stdin = stdin_get_value() - options.selected_lines = parse_udiff(stdin, options.filename, args[0]) - args = sorted(options.selected_lines) - - return options, args - - -def _parse_multi_options(options, split_token=','): - r"""Split and strip and discard empties. - - Turns the following: - - A, - B, - - into ["A", "B"] - """ - if options: - return [o.strip() for o in options.split(split_token) if o.strip()] - else: - return options - - -def _main(): - """Parse options and run checks on Python source.""" - import signal - - # Handle "Broken pipe" gracefully - try: - signal.signal(signal.SIGPIPE, lambda signum, frame: sys.exit(1)) - except AttributeError: - pass # not supported on Windows - - style_guide = StyleGuide(parse_argv=True) - options = style_guide.options - - if options.doctest or options.testsuite: - from testsuite.support import run_tests - report = run_tests(style_guide) - else: - report = style_guide.check_files() - - if options.statistics: - report.print_statistics() - - if options.benchmark: - report.print_benchmark() - - if options.testsuite and not options.quiet: - report.print_results() - - if report.total_errors: - if options.count: - sys.stderr.write(str(report.total_errors) + '\n') - sys.exit(1) - - -if __name__ == '__main__': - _main() diff --git a/src/debugpy/_vendored/pydevd/third_party/tests_cython_json.py b/src/debugpy/_vendored/pydevd/third_party/tests_cython_json.py deleted file mode 100644 index 4c191669..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/tests_cython_json.py +++ /dev/null @@ -1,101 +0,0 @@ -import Cython -from cython_json import source_to_dict -import pytest -import json - - -def test_dump_ast_error(): - as_dict = source_to_dict(u"x = [a 10]") - errors = as_dict['errors'] - assert len(errors) == 1 - error = errors[0] - assert error['__node__'] == 'CompileError' - assert error['line'] == 1 - assert error['col'] == 8 - assert 'Expected' in error['message_only'] - - -def test_dump_error(): - contents = u''' -from distutils import sysconfig -''' - if isinstance(contents, bytes): - contents = contents.decode('utf-8') - source_to_dict(contents) - -def test_dump_class(): - contents = u''' -class A:pass -''' - if isinstance(contents, bytes): - contents = contents.decode('utf-8') - source_to_dict(contents) - -def test_comp(): - contents = u''' -{i: j for i, j in a} -''' - if isinstance(contents, bytes): - contents = contents.decode('utf-8') - source_to_dict(contents) - -def test_global(): - contents = u''' -def method(): - global b - b = 10 -''' - if isinstance(contents, bytes): - contents = contents.decode('utf-8') - source_to_dict(contents) - -# def test_dump_custom(): -# with open(r'X:\cython\tests\compile\buildenv.pyx', 'r') as stream: -# contents = stream.read().decode('utf-8') -# source_to_dict(contents) - - -def test_dump_ast(): - data = source_to_dict(u"x = [a, 10]") - assert not data['errors'] - assert data['ast']['stats'] == [ - { - "__node__": "SingleAssignment", - "rhs": { - "__node__": "List", - "line": 1, - "args": [ - { - "__node__": "Name", - "line": 1, - "col": 5, - "name": "a" - }, - { - "is_c_literal": "None", - "unsigned": "", - "value": "10", - "constant_result": "10", - "__node__": "Int", - "line": 1, - "type": "long", - "col": 8, - "longness": "" - } - ], - "col": 4 - }, - "lhs": { - "__node__": "Name", - "line": 1, - "col": 0, - "name": "x" - }, - "line": 1, - "col": 4 - } - ] - - -if __name__ == '__main__': - pytest.main() diff --git a/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/__init__.py b/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/__init__.py deleted file mode 100644 index 3b927b4d..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/__init__.py +++ /dev/null @@ -1,518 +0,0 @@ -#@PydevCodeAnalysisIgnore -"""create and manipulate C data types in Python""" - -import os as _os, sys as _sys -from itertools import chain as _chain - -# special developer support to use ctypes from the CVS sandbox, -# without installing it -# XXX Remove this for the python core version -_magicfile = _os.path.join(_os.path.dirname(__file__), ".CTYPES_DEVEL") -if _os.path.isfile(_magicfile): - execfile(_magicfile) -del _magicfile - -__version__ = "0.9.9.6" - -from _ctypes import Union, Structure, Array -from _ctypes import _Pointer -from _ctypes import CFuncPtr as _CFuncPtr -from _ctypes import __version__ as _ctypes_version -from _ctypes import RTLD_LOCAL, RTLD_GLOBAL -from _ctypes import ArgumentError - -from struct import calcsize as _calcsize - -if __version__ != _ctypes_version: - raise Exception, ("Version number mismatch", __version__, _ctypes_version) - -if _os.name in ("nt", "ce"): - from _ctypes import FormatError - -from _ctypes import FUNCFLAG_CDECL as _FUNCFLAG_CDECL, \ - FUNCFLAG_PYTHONAPI as _FUNCFLAG_PYTHONAPI - -""" -WINOLEAPI -> HRESULT -WINOLEAPI_(type) - -STDMETHODCALLTYPE - -STDMETHOD(name) -STDMETHOD_(type, name) - -STDAPICALLTYPE -""" - -def create_string_buffer(init, size=None): - """create_string_buffer(aString) -> character array - create_string_buffer(anInteger) -> character array - create_string_buffer(aString, anInteger) -> character array - """ - if isinstance(init, (str, unicode)): - if size is None: - size = len(init) + 1 - buftype = c_char * size - buf = buftype() - buf.value = init - return buf - elif isinstance(init, (int, long)): - buftype = c_char * init - buf = buftype() - return buf - raise TypeError, init - -def c_buffer(init, size=None): -## "deprecated, use create_string_buffer instead" -## import warnings -## warnings.warn("c_buffer is deprecated, use create_string_buffer instead", -## DeprecationWarning, stacklevel=2) - return create_string_buffer(init, size) - -_c_functype_cache = {} -def CFUNCTYPE(restype, *argtypes): - """CFUNCTYPE(restype, *argtypes) -> function prototype. - - restype: the result type - argtypes: a sequence specifying the argument types - - The function prototype can be called in three ways to create a - callable object: - - prototype(integer address) -> foreign function - prototype(callable) -> create and return a C callable function from callable - prototype(integer index, method name[, paramflags]) -> foreign function calling a COM method - prototype((ordinal number, dll object)[, paramflags]) -> foreign function exported by ordinal - prototype((function name, dll object)[, paramflags]) -> foreign function exported by name - """ - try: - return _c_functype_cache[(restype, argtypes)] - except KeyError: - class CFunctionType(_CFuncPtr): - _argtypes_ = argtypes - _restype_ = restype - _flags_ = _FUNCFLAG_CDECL - _c_functype_cache[(restype, argtypes)] = CFunctionType - return CFunctionType - -if _os.name in ("nt", "ce"): - from _ctypes import LoadLibrary as _dlopen - from _ctypes import FUNCFLAG_STDCALL as _FUNCFLAG_STDCALL - if _os.name == "ce": - # 'ce' doesn't have the stdcall calling convention - _FUNCFLAG_STDCALL = _FUNCFLAG_CDECL - - _win_functype_cache = {} - def WINFUNCTYPE(restype, *argtypes): - # docstring set later (very similar to CFUNCTYPE.__doc__) - try: - return _win_functype_cache[(restype, argtypes)] - except KeyError: - class WinFunctionType(_CFuncPtr): - _argtypes_ = argtypes - _restype_ = restype - _flags_ = _FUNCFLAG_STDCALL - _win_functype_cache[(restype, argtypes)] = WinFunctionType - return WinFunctionType - if WINFUNCTYPE.__doc__: - WINFUNCTYPE.__doc__ = CFUNCTYPE.__doc__.replace("CFUNCTYPE", "WINFUNCTYPE") - -elif _os.name == "posix": - from _ctypes import dlopen as _dlopen #@UnresolvedImport - -from _ctypes import sizeof, byref, addressof, alignment -from _ctypes import _SimpleCData - -class py_object(_SimpleCData): - _type_ = "O" - -class c_short(_SimpleCData): - _type_ = "h" - -class c_ushort(_SimpleCData): - _type_ = "H" - -class c_long(_SimpleCData): - _type_ = "l" - -class c_ulong(_SimpleCData): - _type_ = "L" - -if _calcsize("i") == _calcsize("l"): - # if int and long have the same size, make c_int an alias for c_long - c_int = c_long - c_uint = c_ulong -else: - class c_int(_SimpleCData): - _type_ = "i" - - class c_uint(_SimpleCData): - _type_ = "I" - -class c_float(_SimpleCData): - _type_ = "f" - -class c_double(_SimpleCData): - _type_ = "d" - -if _calcsize("l") == _calcsize("q"): - # if long and long long have the same size, make c_longlong an alias for c_long - c_longlong = c_long - c_ulonglong = c_ulong -else: - class c_longlong(_SimpleCData): - _type_ = "q" - - class c_ulonglong(_SimpleCData): - _type_ = "Q" - ## def from_param(cls, val): - ## return ('d', float(val), val) - ## from_param = classmethod(from_param) - -class c_ubyte(_SimpleCData): - _type_ = "B" -c_ubyte.__ctype_le__ = c_ubyte.__ctype_be__ = c_ubyte -# backward compatibility: -##c_uchar = c_ubyte - -class c_byte(_SimpleCData): - _type_ = "b" -c_byte.__ctype_le__ = c_byte.__ctype_be__ = c_byte - -class c_char(_SimpleCData): - _type_ = "c" -c_char.__ctype_le__ = c_char.__ctype_be__ = c_char - -class c_char_p(_SimpleCData): - _type_ = "z" - -class c_void_p(_SimpleCData): - _type_ = "P" -c_voidp = c_void_p # backwards compatibility (to a bug) - -# This cache maps types to pointers to them. -_pointer_type_cache = {} - -def POINTER(cls): - try: - return _pointer_type_cache[cls] - except KeyError: - pass - if type(cls) is str: - klass = type(_Pointer)("LP_%s" % cls, - (_Pointer,), - {}) - _pointer_type_cache[id(klass)] = klass - return klass - else: - name = "LP_%s" % cls.__name__ - klass = type(_Pointer)(name, - (_Pointer,), - {'_type_': cls}) - _pointer_type_cache[cls] = klass - return klass - -try: - from _ctypes import set_conversion_mode -except ImportError: - pass -else: - if _os.name in ("nt", "ce"): - set_conversion_mode("mbcs", "ignore") - else: - set_conversion_mode("ascii", "strict") - - class c_wchar_p(_SimpleCData): - _type_ = "Z" - - class c_wchar(_SimpleCData): - _type_ = "u" - - POINTER(c_wchar).from_param = c_wchar_p.from_param #_SimpleCData.c_wchar_p_from_param - - def create_unicode_buffer(init, size=None): - """create_unicode_buffer(aString) -> character array - create_unicode_buffer(anInteger) -> character array - create_unicode_buffer(aString, anInteger) -> character array - """ - if isinstance(init, (str, unicode)): - if size is None: - size = len(init) + 1 - buftype = c_wchar * size - buf = buftype() - buf.value = init - return buf - elif isinstance(init, (int, long)): - buftype = c_wchar * init - buf = buftype() - return buf - raise TypeError, init - -POINTER(c_char).from_param = c_char_p.from_param #_SimpleCData.c_char_p_from_param - -# XXX Deprecated -def SetPointerType(pointer, cls): - if _pointer_type_cache.get(cls, None) is not None: - raise RuntimeError, \ - "This type already exists in the cache" - if not _pointer_type_cache.has_key(id(pointer)): - raise RuntimeError, \ - "What's this???" - pointer.set_type(cls) - _pointer_type_cache[cls] = pointer - del _pointer_type_cache[id(pointer)] - - -def pointer(inst): - return POINTER(type(inst))(inst) - -# XXX Deprecated -def ARRAY(typ, len): - return typ * len - -################################################################ - - -class CDLL(object): - """An instance of this class represents a loaded dll/shared - library, exporting functions using the standard C calling - convention (named 'cdecl' on Windows). - - The exported functions can be accessed as attributes, or by - indexing with the function name. Examples: - - .qsort -> callable object - ['qsort'] -> callable object - - Calling the functions releases the Python GIL during the call and - reaquires it afterwards. - """ - class _FuncPtr(_CFuncPtr): - _flags_ = _FUNCFLAG_CDECL - _restype_ = c_int # default, can be overridden in instances - - def __init__(self, name, mode=RTLD_LOCAL, handle=None): - self._name = name - if handle is None: - self._handle = _dlopen(self._name, mode) - else: - self._handle = handle - - def __repr__(self): - return "<%s '%s', handle %x at %x>" % \ - (self.__class__.__name__, self._name, - (self._handle & (_sys.maxint * 2 + 1)), - id(self)) - - def __getattr__(self, name): - if name.startswith('__') and name.endswith('__'): - raise AttributeError, name - return self.__getitem__(name) - - def __getitem__(self, name_or_ordinal): - func = self._FuncPtr((name_or_ordinal, self)) - if not isinstance(name_or_ordinal, (int, long)): - func.__name__ = name_or_ordinal - setattr(self, name_or_ordinal, func) - return func - -class PyDLL(CDLL): - """This class represents the Python library itself. It allows to - access Python API functions. The GIL is not released, and - Python exceptions are handled correctly. - """ - class _FuncPtr(_CFuncPtr): - _flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI - _restype_ = c_int # default, can be overridden in instances - -if _os.name in ("nt", "ce"): - - class WinDLL(CDLL): - """This class represents a dll exporting functions using the - Windows stdcall calling convention. - """ - class _FuncPtr(_CFuncPtr): - _flags_ = _FUNCFLAG_STDCALL - _restype_ = c_int # default, can be overridden in instances - - # XXX Hm, what about HRESULT as normal parameter? - # Mustn't it derive from c_long then? - from _ctypes import _check_HRESULT, _SimpleCData - class HRESULT(_SimpleCData): - _type_ = "l" - # _check_retval_ is called with the function's result when it - # is used as restype. It checks for the FAILED bit, and - # raises a WindowsError if it is set. - # - # The _check_retval_ method is implemented in C, so that the - # method definition itself is not included in the traceback - # when it raises an error - that is what we want (and Python - # doesn't have a way to raise an exception in the caller's - # frame). - _check_retval_ = _check_HRESULT - - class OleDLL(CDLL): - """This class represents a dll exporting functions using the - Windows stdcall calling convention, and returning HRESULT. - HRESULT error values are automatically raised as WindowsError - exceptions. - """ - class _FuncPtr(_CFuncPtr): - _flags_ = _FUNCFLAG_STDCALL - _restype_ = HRESULT - -class LibraryLoader(object): - def __init__(self, dlltype): - self._dlltype = dlltype - - def __getattr__(self, name): - if name[0] == '_': - raise AttributeError(name) - dll = self._dlltype(name) - setattr(self, name, dll) - return dll - - def __getitem__(self, name): - return getattr(self, name) - - def LoadLibrary(self, name): - return self._dlltype(name) - -cdll = LibraryLoader(CDLL) -pydll = LibraryLoader(PyDLL) - -if _os.name in ("nt", "ce"): - pythonapi = PyDLL("python dll", None, _sys.dllhandle) -elif _sys.platform == "cygwin": - pythonapi = PyDLL("libpython%d.%d.dll" % _sys.version_info[:2]) -else: - pythonapi = PyDLL(None) - - -if _os.name in ("nt", "ce"): - windll = LibraryLoader(WinDLL) - oledll = LibraryLoader(OleDLL) - - if _os.name == "nt": - GetLastError = windll.kernel32.GetLastError - else: - GetLastError = windll.coredll.GetLastError - - def WinError(code=None, descr=None): - if code is None: - code = GetLastError() - if descr is None: - descr = FormatError(code).strip() - return WindowsError(code, descr) - -_pointer_type_cache[None] = c_void_p - -if sizeof(c_uint) == sizeof(c_void_p): - c_size_t = c_uint -elif sizeof(c_ulong) == sizeof(c_void_p): - c_size_t = c_ulong - -# functions - -from _ctypes import _memmove_addr, _memset_addr, _string_at_addr, _cast_addr - -## void *memmove(void *, const void *, size_t); -memmove = CFUNCTYPE(c_void_p, c_void_p, c_void_p, c_size_t)(_memmove_addr) - -## void *memset(void *, int, size_t) -memset = CFUNCTYPE(c_void_p, c_void_p, c_int, c_size_t)(_memset_addr) - -def PYFUNCTYPE(restype, *argtypes): - class CFunctionType(_CFuncPtr): - _argtypes_ = argtypes - _restype_ = restype - _flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI - return CFunctionType -_cast = PYFUNCTYPE(py_object, c_void_p, py_object)(_cast_addr) - -def cast(obj, typ): - result = _cast(obj, typ) - result.__keepref = obj - return result - -_string_at = CFUNCTYPE(py_object, c_void_p, c_int)(_string_at_addr) -def string_at(ptr, size=0): - """string_at(addr[, size]) -> string - - Return the string at addr.""" - return _string_at(ptr, size) - -try: - from _ctypes import _wstring_at_addr -except ImportError: - pass -else: - _wstring_at = CFUNCTYPE(py_object, c_void_p, c_int)(_wstring_at_addr) - def wstring_at(ptr, size=0): - """wstring_at(addr[, size]) -> string - - Return the string at addr.""" - return _wstring_at(ptr, size) - - -if _os.name == "nt": # COM stuff - def DllGetClassObject(rclsid, riid, ppv): - # First ask ctypes.com.server than comtypes.server for the - # class object. - - # trick py2exe by doing dynamic imports - result = -2147221231 # CLASS_E_CLASSNOTAVAILABLE - try: - ctcom = __import__("ctypes.com.server", globals(), locals(), ['*']) - except ImportError: - pass - else: - result = ctcom.DllGetClassObject(rclsid, riid, ppv) - - if result == -2147221231: # CLASS_E_CLASSNOTAVAILABLE - try: - ccom = __import__("comtypes.server", globals(), locals(), ['*']) - except ImportError: - pass - else: - result = ccom.DllGetClassObject(rclsid, riid, ppv) - - return result - - def DllCanUnloadNow(): - # First ask ctypes.com.server than comtypes.server if we can unload or not. - # trick py2exe by doing dynamic imports - result = 0 # S_OK - try: - ctcom = __import__("ctypes.com.server", globals(), locals(), ['*']) - except ImportError: - pass - else: - result = ctcom.DllCanUnloadNow() - if result != 0: # != S_OK - return result - - try: - ccom = __import__("comtypes.server", globals(), locals(), ['*']) - except ImportError: - return result - try: - return ccom.DllCanUnloadNow() - except AttributeError: - pass - return result - -from ctypes._endian import BigEndianStructure, LittleEndianStructure - -# Fill in specifically-sized types -c_int8 = c_byte -c_uint8 = c_ubyte -for kind in [c_short, c_int, c_long, c_longlong]: - if sizeof(kind) == 2: c_int16 = kind - elif sizeof(kind) == 4: c_int32 = kind - elif sizeof(kind) == 8: c_int64 = kind -for kind in [c_ushort, c_uint, c_ulong, c_ulonglong]: - if sizeof(kind) == 2: c_uint16 = kind - elif sizeof(kind) == 4: c_uint32 = kind - elif sizeof(kind) == 8: c_uint64 = kind -del(kind) diff --git a/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/_ctypes.dll b/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/_ctypes.dll deleted file mode 100644 index 238e869a5a0494d85428a913ece01fff9eeebdf1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 287417 zcmeFa3wTu3)i*wY0VX(bf(D6*5;P)Is>DkraxoAF5QtzQK+$-qmuhYC!i=CKm^d@a z@i+(-6{}UKmr_eBwiJ>;As0%5qDHV9E^1V?XF92dOGCJn`Tu@vpL1psKz-Z3-@o7U zd_FCibM|fRb=zyLz4ku&GZ(o!x?Cba$;zeXCzt z%EY(&O}+h&*_n6Wd)JTdz3Hc!H{UdC)?NP0AKsdIZ(vsD9kVh=PnweX)4OiD^}=r5 zx@K8jPxiT76H`)M=LaIAU7I?(ZtU!GHM%qnfQhOwM^?I^K!W|RXa9dd;BMVa#LL6`7k-fHTP8hILUu^r?^&n zPg3@8yuQ$X>s&wDuWG(B{Qo2TKk2_* zm#gx^dsW4f3>2gRBdph&R0%(G@xKdyID0nVE<+X9VE%Lbev!7YcX! zPczWA-UzfBXxUU)6Y=+V)w;8CI}Mzb9oLJp+~)PF>Sq5t`_HTD@p#-g z<*M)&{S<#j_O|FPe13u*8ROjmIetw>>=h%AjpCD8A&(V;C8z*Qn72P96RJg5p@AqN zh#a^C6>|Nrl)kbG72{u_U}x#${7nIl#*ZZiO*hV-5Z&r^(ph zl1tDwT~mg1|xRRn!si2_{0vKbi52sCl_ zo2*j3MhpE4<$}wcJh#7>R08V&hhL!*01rKl3WQ@Lp7qT}$8mZ`zHY{A^I26V+!K6c z7~R-+nb3>a&1T=fdQ;IWnoo)kUxgYQ z>Nq;}SYKi?%o^Kb%gsLZz|QC_uhEmW*8LN;jN)efI%t3*kW$3JBbB3MpGBfS2Ppn0 z(}VQToPDxH-M#aM%Qa35H4x4`_wZczz*$=8Q9kutHWfTs5Ags3$Q|kE6XG$Llrf8i zycJx$^v$ec2GCQuMSHRubbhi*TT>OT*20U~(*$=}pPm5n2VMZ_HRgCRDUa}saJF#; zdz^;m5RL50B;~B=Wz5cCg#dspRlOX2vKqasJ$m3AJpjdvh0@Cv5B;4bBTcxDYN!c= z=`F%yC4v%nR{R4%_+}e)CHMzRKw+8;>E+7nBhz`C2GH8V_V;-%R~!B-C=)69 zd^(jc$Uug2lcVzhkzRwPZkHB14|B8)5z%huUs$>sr7c>h3roioxiJR07nz4`3*< zohFNYmC)(1KS0?trC1B;s)-r9pVhU{W<19`8c$h_-p%Mrd#0-vdWIFB7oIOjqnc4> z+f-<$d=E6Ove#t9Ym7318v=}aWLY{ zj=TxYExX3wHM`QT3vxq4#J`^b%OIwt-cPjJ=!0OxK7B-l>kf1rcLZB*Z+ItvONGH z-r4BMzqFq8dfHD5g+gE*Py{AlnYBKa3o^hL(VKx9y#?|Q#!WZ!y-;z+NcYfAeSmQP zI5dkCv_^c&7>A{6o{AAI5z91u2XgY#i+sies(M%$t*B2Fm|iGcPB9%*EUW>(Cr^%` z6qPJ95a66DiUia19k{kr7)`3h{?Jz5=r6!n^%yX>Xc&$X zqTE{Ja&4)By;J~&{JEkS6vo{iV@A5ZFL=Zi>!G^Q)^r#Zs`Pgnne*m@`?<1EM9xQR z%kC6?n4#3*^7X)|wWt;z$Qk4IpT;@Tqy`^+ibkN?EOY{q91Je)>Bhh4P^>Srx|$-6 z}fMZa45iTVJ=^Sc<6Je%ETIDDRsB{O+-TcotE1IqQ{ zcuZHIXz|Xl^5=^=LAK+8)@V9wD2}9G?qX=&$?I;nt?R)N!Ulm3{TDr z^o6xBb+Br)vGT}(S-P(&IGjIR{>km_59mD-JH1k2+&^6ewwF*UzloxDzZA_KVQE4w zRDs@uwdqFR@UFl{y%{F2C}puv$_|i*l!Bg1sF^Qj=e6j83R%?y%vYWcm_NZfZcO&% zJWZGbYb|(71YR>ZoIhOtWha66mW>v?ze#wAyWTl7n`@TJLJWLHM}FaB;el$mB(uBV}Sur;oQ zj-e8a_-FBlM!c(ez$||b6i@|!2_wxAgaoprcpr*;qqy9KUm^}UQtTfn^y7N70K=(; zUAVxu4$a&R#ChOBT0k&8VX_r_R466-IfBwKfVfZD~pxe(H+rjfl(-;7z!jO5at^3c=(3gYmY_d&^x$}SZYJr|JwC50-{|ZF!3(RFiEDcUSDH$r#0ComEXv#0(6_zY(i^~9^j$>yeh1@# zzQJqT)AxaYmA*71z)=o;SFGcRZRmTRy!u-D-m6-jjJ{K(Skd=av%U>|OQn`YUs+`? zn)B`G`vF#ne}ldYe%g+{GWhTB`}gR(k7^0h#2{d@{9XZIE&7UYH6G)DzAyg3p|42q zWQuL}w#XPI!)@jFx9t0*^6P3x-@0;(z6sfW(3b6$s_d>LO_OeyEs`=VWG(!nZJvYO z0%-p+qb;;EC6olV|9biI53+*G5F%mjeAV46?Yes)dW`ELtVtMoQy7u%4)IFTKjAV? zNelmsh_LjJ0#uX?ml~QhZ%ehnU5*aA6~*PUP|@srO%+dOF;$H}cX;l8OH-}mFEQ<8&#d@_qHv?-%^iF_W@}PHepd#J{df+;04=A5k?^N7{7(a$_ zR-|%SkqE^=)&D3XNDB|FF~sO8N@23w-&a(bGX0d9e`KxHNmsSC4Y}#T)|vkEf~^nu zGs{Hz(HN4bpIVM8&;SU0>!0zsIbu40)1BUcM|;()=3)Vu=Vll;c#NU?+_dNmD7NE2 z5=t0^U<^X?&aMb!F~BhNvSn7*!nB5UFdtLEtT%{n$o2LJ3{5D{&HnQoSK&@FnB}z$ zlIa9yEV9?1o~{2JFspnCG~aR>bhH>SkjKcNrSLpEYw>rp`~WRC6vaVC598k8VvS}F!SLT{zC-sv=!itV1~)z(gSqpwo2%ohVDgmk@VF|vrE-Nj4! zBG040PF~SVY04T;OI3QYyv+RFhOhzru<$lLEub0CkkZC%F(;>G7zGU*;y@`n(It!|9z1%X+W8E4 z#1i5u^Y#K}Y02T`+ty38D3|w?E^y}>B0aFXg1rfk!*`@3^*rAfI2a3mOib8YhTxP% zNAdiK05wi*%xf~D&!CRBVGPxh7J3lyK6q!ab+Q)zIZ^C0kJ8^7mSy-%0%dM}o!qj= z$ls&nWdwUHz_STBJVuJB3dP{CM6drnYd5lj3jg_M28$cxT6j1*kM2JrBlH1xLmg`2 z0eBgE0%LmdCibXtIbEvDv72&(@b(lSu*tY$l70mgSGWd^omhDEkI7H5XAO`d`)WNq z+=A9W5Ut174Yq8B=O)*W{r z3iEbnVUMs6j3mUuP5(7YlfzuPKwHx*G9fKHzF@LMu}67zCb=r-JD=i);8c8i7Vt+7fQ; zEU8SbUy%~H2%&j>Vug1sIw;y`6qpvx{HH)q*iOE2uWVyX%ZU9ALt;Qdg;IU+rqSr) ztM!Yqc6=u?Hbn)X%KIqE@Nj6O7d@R37(Wp>FlI`hQ!-+2I-C((ZS9bbV`DayEwBP< z82dB#zot*iZej49=(lu=86Y;S`1)&HvkEQ!gmv@;K^jmCks+ph7$uZ0Fo#IX+2{t^ zfypYC*kw%6GZ@mp1+VnAtcxMVuG|N^!T zmPLxxJ-=gbEAa+4)^xqq4%nAH$Er|95=>FMuVK`+>h*7+0l@GDdRTzk@;p_pvwg4$Oz>giJ#T8B;RZ^OE0S*zS7GReE>W z4tH3(*q>9nN#@lcCL8L)qUZ@xg%Y?p;Kf{=trLA7=%8w=x;9#4>y0klo9Ghw*yzTI zI>bS^ibb(n+n0OV=*~sSMa;f% zqEa_J~Y+{7Duy^g&G{mu9mgzg`ZlGsY6&#nC=*--u`f?_9w=ayu6 zesp4ZtX=0bXscrX=Fw$`swygtJF7~z$GKazyR7YT0* zU=c_&uJPz;#>n)-8tPw|gW%!LwJz|!t5EhVvGHT?-v39l>A}^b|A%hg%lkf8vyVPlG8^jA>$N>_!HEwY9GM=U1K5j z2x7exGK^_O`jmA4`9>hUHqVQp($n+?eRiKVd}NF%3bv-rxd3%J!N`;W z`V@~bzet~#Zd_diSu=dxiISWLvm5kStc>b$|F6(*o+mqQwd1IWFwm-ip=G`Y$v38W zVrx#A=yV^@kJvNw$c~R+%6=V)jd-iL5WUc61%HV)n82oRIsIDpw$gPafSBF~GB@l! zIZM$c@EvTD{dE}m*w2NDcBq z@sBYUaKVGd83sk5z>_ef1Kvuk5UA)a-C%tGNKAq;iKb%-7f@9lq=kd^a|(;4eY^HdQP16>Fh?3DL4jB1HR|Rb?xx5OMLD zYMdH|TkqwASfDu)n9#y6v!`hueZStz7?T!Dr5eS$lbcv~;PL@fW`QUy`IrcT5PkOy z$Q&?rJ-T2Ih$6%A5isxM!cg4A*oaJ`2O*S6aH;P`uH7H&&~AC_jWivkDukzFG+Er@ z>JQeqxGQ!c?E12F3~7w#_LmoNBxAOd2c%Yc*(sUy3fVfD6g4C=q$Pl89M}S>EfiX) znfsZ=v5YHjwciPl-G%mCQx_0_u=cFnwFUB;s(`qMlra@aZJ=Rl8fRNV5l{4O1dW`-J{h0e+1nha4(ckdlbL*iW9>7e9 zWRy)HO{W687-)hd_C7NDZ6nF@CaUVUiOaDRbGM=^O)&}O}ZK4*TJ*&`3#s^r+-n@3@jY?N%0 z!7!hR{8IYnqmVc<6KNGEu&1zH=hXvL+VsU}=Kg-UkmxdF+GNO6FTC!6uCAX<0fZkc z8Pc$?Rh~(p(YSiDW5~s}S3V~?l&R#k;kZoO-_Mwq3x+vgpPy??8!fZEf}7-5_^&1ZjBc;MRbVJkZ;Z)J z%&S^D0fMYFrjfN{ase#xrh2sS)~9yNXugiUiUP+Jm0@B}8)HTrb0x}!WR~}ht8*PZ zGLb96Pd2{Tv*2sA!q@UC#Md%(Z{v%T=7G=1}U{WQjkH+Do?`Dm-jd3abS zxzmtXNn48D0VE+oVn3#g))gL1Gm_sR8ky?&@GtDSap^UD4V0xPVQ3Pu%(`Tt_8*h^ ztXKQH8>^O~8jYO>%x#00NQ0@Nte7-Hze73q12D=vGU2rmrftg%VXDUJe| zpul-jLAEGSp|?~(>=*`TKdFG@0JI|V^W8NISLSKQ>C-nx(EwP9WqCvN66(9?KkwODTDN^vKmFHl#La!vtj<694RWJH_t)JGr;e0NcosZ z2{a(P)7QXI@7Q77f@#$!4dI?kZzP_aO@PM!Js!{1LQ!-fTcICuh6wmuQOpMD2HjUr ze*XxlXLz?@E8E5M@sz(6#=cUMTl9iW{+pmJ3-MN|nUQY)Ih5QDqag_Jd`J)z1Yu`; zxOeab(2u>0t;lcG3+r-bdbH4G5P^-iQ*erqXF8p=?2N1EC3xb{Aq*|qktCNEI)qM2 z7m_AC%Y=E+p0cHeXhecCTArA{+N1L4C*UuvdLIFqT4)r(;eMCD6E?Jv{x@c-aWyQ| zdc9fi0mleq1H6C41fxYn7u4lWgzc{#7Vj7h5qzMaCtQQC;mH<=Rs0IwLVI5X$a0e4F%lgP(LU{7b#TL!Gooz5+2MCbPY9#RZWGDSH0;!1xtz zE%ZJW)qJ-&EIE!F%N&3{S!O5cHP8jL~ejA7=g zSE=BF^&57x2Tq?mR&rvlawIMs;u^ns{RGNG}`Y><^U$ zGuF*67|hBZ)587G3A`R)L7mMz4bMdMIZu5p$va}$=r}Uh2}qtDH(vyZ(ZLq`y#J)? zYm4ncAUH(l%?b{!eHL}cl6}-MJ}vy%Av`#n!ne_k>`T+LFHVuM$c~6dCy*AzizLpM z58~Wuz>72Z4yFLc8;KU>ldY2IIe4PDsRRd_a9HCy0a|j^jbud$M?{D7r<|FI&ce@x zKPmP~5rgEQ+o1878I;vnhKPN8w9qCfCF6!%z*^X-h3fe<9k#Pxuv-fSrEE04fXAg^ zGO9tbjrCwd3L673Dkn{?^{uB`u(UTe)|XW3`CGYDRqJa~B_{s_y2D9A@F%}a3tvKE zDD=XREL>{jFXe7LWxyzGgd2o_U4v23*iu4ulY-^OC|GJhZ-p7a%=t(QU&X2cid?$k z+XG%oGx8gOMFhwijGwa=;|oS$DVfml)ss6$4^_O6KwI{fVDWn{9Fq#ap@k=)uUa2? z16?8m0ABD?p64o9fh{rw-HTW11uto#vrr3ISW*dlXGv9R-HH?~j5wal&Poma6BUBR zFQsUYyg=pV?)35uzcg)bhOa9b% zZ(TMv81H-Eo1C!Pp3_3xh$`Q6!A(W7;9~v@aS%U_JwMv~TF65S>AI}6;IWjl#o!r# zr?Q`sbs%A7OZbCQHJ(G~Owx0hk}?vY&|g*^ax4|4)`Z08bQLZx!% z5=<1fwdyUK2bw^Rr3^W+s*t7lJ+j`&pP~27;MygXLjy}e2=a)v6Q&M%AQ6*n28dJ~ zUkkm8*)zl0f;CXaAsg6{uMX<%;aXoM5Df&C<@nZXk!gbEN-lR(7kUwLF~;+dL9I_# zuHc~*J+OLysxc}pSedT-R%N#Y4|SN|(YP0Uv^v<>CD;mu%*2X#R(SQ9SWi_Jq!!*u zS$D*HE1$5`Vk;TP#2TWnQ}SRrLnGlx%Rg9JxH4P_s-MS_{H58I6{liTm&6B7RH6`+ zcrMNw$lAH2N;m@hyTVzM*h(=*^i%1r!7x^0nmV@KZ=g7a@1Y_KG#Iia7`>TK$i`Sq zhf_x~fGy8qDm04Is+U+2XpHvdi^7+oyS_Jv58CtmB$gk7$mujJGbPoRUTDhiy^%Y0SHBesULS#~b3t|{G zHrLro25Uz%F}k)UzfzXLxX;|)n2L-cY-sq_4;|&vB7Y?8o5vWVAA*a#KF)RRa-#rL zuG2?9<&FC)jr_8l{L1K2!PV-G13)@)J!Dm(Dv(9!9*hh~jg)bE3MV~=)on6Hhv12) zy{;qqJF|OC%D>nfdx3AsKeY1H+fwV~7htx)ZH@*gln=-T`Sc9EIXaHL7jScGhLOKY zbiBgaqe6B6sJL%^bTAtwggtyKspH^%IlFN9>T{Oa14Ik(M8LKRU>NVOB$B;Z6#W}B zER=%vK0$s?&Z8R{sc%BH&pHpX6JmTL>jn}VgAipcoE)4Z^l0o&k3wT zQbKyrX{9Jx!@@KRBOyf@jKeH>X^}0>)9q?e@G&^`QH1L^>?teJ>{6ca$ zw!8^a=%<*Zz5kM2{-TjbI72f|QXY(sKuJ;yEzGW>H?)5{oo~?Z2j@?7?xDSO)JCKIKJ+3e}z-BzP9$rF1)Dyy1ni1B4VocwnbhB zQ@+QPu$LjU3@-pX)#eRBFRoZA;>Wf&aatvm#2R4VDUa6R3H9x~q3GM{*-VBU z`!EnM`jfY0rN)-TW!+cAZbB1m585BPSJm%yJpr}-Y=lsoi-f6^KpDSIi zP75#;yhZNBO-VCw@-MHC4yRt9onb|%+w2Ee`M=o9n|%Bh*iV3d%GflTZCJu)BAy$# zT`dIDk$Q)bT#@2t#P}kGCgq#EVOSiYgWTBZpD3C#~u>nCV_+7c4fqmJ4@+P^D6ZoF8@dD6O%+Tnq})TGR`4-5==xqSm87#{+#9? zY*|zO92gQ~bLM*7H}gPgdo8KOXxs}6?2!;T zgM&S&UUsn;jA3PAoL}~l;u9R~=MLP)1AN2+V00zkuR)K|7um$-S)|KY=g*3a{5@92 zqrXepnSBP%imi2Q2lf0byYBsV!r-kO+p=WC^XsOXFqTYfzM@+iDSkZ{l;IfA}Yh>Hxt zIiBoYIs1z*Y#0AEM!&$_TQ}`ki^)>48s_>3*a_AARmu(hG8kXN53SPUrNUzeH zpE+0_BFk>pUfa(!19eRWY)X|MKHZb@J_PIx#L=MwIGcF8j87FvFS&wjWuj_6H zzb3#-EPcQ&i5(ebk~Tr%RA$p%fDCms)~P5#1EzKaG7{q;BQ!3@Eu+IhAka_kgcCn1 zOZ2z9!M6QSX0P93kEs1%8sOL(g2#YO;bM;#>Hx{*%x3^j$r4?FKq9tOIQN`=+Cmtb zIFVJPZ+`~DaqtL&bgz9xbS#F;hEpOQ7c-;DI*y$XIePTeIg!+)S1O;r{MW07#;3H77STw z$#-hM2`wk14~DJ7=X!nYmld~LQX_&9`|WY|gsrdBsfD2l)2N#By$Cuic#n%X7Mr|` zp+2wn*k6c=_Yc0W@2@)2x8L@mgrlvCVX6iTcMI?jBzo!4Un&&aFR~ShFx(nc26hs9NcCuQQfx=^{w07aG;%X)?DIc%K0wO3 z;02Wrkdc%Rz|$y91VC_WW`<+~NTOU~KeV)I2UC@oyxqjVLBFu#Zcm&QnGrrdusZ~26SP(&M zEvrk{oa;DZd&GXk893{X3@43e#Y5}}hc19Z@aVdA3U^*(x~TrhFPQpD(}Y$+4Ezo- z`(QmOlS59Uvob1}TcvIjKs3B;E8D}zLN#XM$(3K265UDjxd*N3z?}F#NGt2gaG4T) z2*}KjF|Ftz^1x+u1~=|xZ`7D_)mD?BBiak)Hhl`CPXoS0{FD3Zl6j#cuL4;Zu=@I6 zhMA2kq7D(#UQ%({iPiAE}Ih8bf zfU&&*11LEwk-F&b0U>Niq|?y8#q0+Fdj8GiET$j@(F?DH1V=Wb0dmKa z&$IC`Sr3vlF8wH?iaA^MzoI=|t^KUZmaYMB6lxo)t<2cSMzTV?cmffb=PoTPK@p7u z|3wrGBe#tc<0%Gw?XRk{_|51!0OHPsAxO|EXW{%<1!}*@m!dMZaAIRGRujAue~sDc zv=fa9iMxL+_3^wh&u!cwxk&e+59kZXAS@Wh9PI2oER`;^VII0UMX$X|KLweih1RBr z--mt>PM%7sr95^c5L84PU9crW z=hCT(=a2Dh%e$2yi#t73{xL2s9L&(zYQRpe1;2hT8$VYq;oNE{|E~dhQY{7v>rfVhkHL7N8DsCts-M85ga#{7EGj&|b&-?RQy9)e1TjZNMnj+UA%EZLaC-Fa7#gz$=tVOoWu$ z;OF?%&`D?1PsfN(Y9!nJ1EDJjDq=B@0luV^mP(WohBl`uHlH|u>09K{BHpFr>=AAT z!-86W9|>{NJlfYG&#^iucOp8JHW=1Bv6sg|Hn3SLiFa-AE$2~i0TWSJ82u-Q0G10p zh0&37TrReE@=xt_?K1&Pxuk1@7tF3;4~XG+75y=N*>iN8p9auX6;I6~O&z9E9o1WJ zD}YCNsr(1l+jl50{6#r8H(*ErQb82|>^lm@{)z3fBdkjY)P-#-!{%XW=RZeXXQyqr zn~4)Yw+LyP5N_Dd;TwSvrW;!JwR6?&IMup8le!Q_5#3Y%5?hnk#XY#D(RbThM$}pdj?Q{LAT~#XUo*EybpIv)Qp!q zm{sytTQJaiheP}7Ae`Q_62HymR(Yx_uRJ^e+cC(Be=!$Fmd&758Ur90r!tOn=G|DV zcgqC`=FL|5o2s1TLr93_Ju}ghnP-*%MwQbUP`%%Xn&x>{X~Zh!DR!&(8|gN5vdVvI zjShm^z2~-|{PTN-1Pk(2IpZ4UyVkoL_09!|%Q>ks$(rxx#`B9M_{UsfRZK%ga7KmH z{E+mq^n!UGYGBf?V`$Bpiw1ttjYqv_3H~*2wYqo<1{AyaD_WPax=kAM5~~Z`1SGs| zLxJZPOYo0*j@8AZstdWLqwMdBOT;`go9|Ijz}b?$IknXGbn~{4ZYB8F{0KF`i3`S9 zV2EGVl01`@st?+t63tRG8I4D90sb|gwYunNb#at4VRdIFcUWEEevooXmNk>ncz&@G z|ClqZE-K*@5gEKmq&)TPBz*Q)T`a@pnC5q^+CM{W;Fx}tc^k-0hAummt)T8GuzT;8 z3gAwc>9Zu51_!pr+Z{d_^^F-UH|!Wi9kwF)pd;ld3{JE8JmK zD4(XfkiPw@!sOdgVVqT=>}FQ5pqrv9+|3H-TNNs1Ck=72s*rXcDs;9gl+RBZqKB%0 z>A8fT2xQsgNkcT>l|aB~aEr|1=Sf4nqbj_DSJ_*zKWk6_5xjr##%$h0;XfeY7x(edf=~}&uzJg$DjA0%ZzXqB5b&Q7PrdnD>@f9q~eCHmYAlkO&O0{ zL~heN25UNmw>|h3Q##afjrU_aK;$g|V8i97VjL`lX2UGoPTRrtK;n1k+Us1Hq7u=R z*LOs*OXy{w35NO!uX?N{dY!8t@591?tsWyU{aq6qAdmb4E#Y1x^j2&90AW=czd6;# z9rO>3tG)W+aQs9QID|~5H|F+aSM$=}{VM0s>{NISx7E7GL2>Vb4gGx0!Cn2f8>e&g z<8=QI6L+Vo?DKXvL|!Jd@ZX9Y13<=zqyp|?-U6yN2YCOcg#IXa>SnvIFfVHaoR+Y! zfW#FK3Zv@;qcBg+hkxpFm7}M4)W;PC_fGBx#;tQb?eCqu3cZRo7`+OW+r|GA^Z<}J z6THiR1&vB7L;PoU!$9g>zrVL&FV;<4J9IqAew&^H7O;u0u93CiEIWggr(AyY6E1tP zj}!4z8Bg6G0J{R7H*Q&cHyS+T(s$l>8meh)uLxFmxDDsUI*rNMI|nx%1o~RtR!Fy8 z<{y!6vGJgCmce5-yoD&y<*FWkqcI8WN_a0(@Mc@^qMhCdgu@EqV>WevE`?XnIUWPQ4b9y^8AS_waHNJT-)KnFF5hP5B5B z6EtSiXb_YKewfybS$@v387JqpKimAHvs+?Al)ri~<>R@CW;N&BpYh;jYFnpF{4mg8 zrQegj8d8)p{PVHeaI$#%Yf*pJBKeowvSkkX4pO5*xV8@VTFnqgF$tZ468$Wt=^c>(3bfV5%CoD<{-E#*y zKbk?rqXpb~2n0NP@r%(jeac1Ebc7|B-qFXlJPzVMEVhX257z4lPHe_ra@DcE{oc>n z3q`gUm>IZN2SDiMJD8ubR2UG-{W(PHYzC|`-yVQ6aM?P=fVp5Qn*sm8BJidJDYVdg zWGJCG61zsd>fF;qudgI%5RRizXvb&eox1~TIem-EZAd<6_v?% zZnwqC?-P;Q^R*%ch`D!+33Q>zU^b4(O#U%(nBePz2GXZ1<|09*yB+qs4at3__a7Jb zRKCbz^djtsI^`vRm&RG(5^|9>KOq_B$V4lP@iqwKK@pmF{zw>a4(1Q8gNA72;!iw- zEy1=#Z?8&WJPL`;Z(NVDP9eTG0&BoHuGD1niPVK04DsCH7Yk+gx2rZ1K zA@NY7n*(JQcnP`KdYkMRTbs)Go`(K;X;TJlQPfQ93$4kp?a{~|Tah_peg#y}20$pV zCX(+hbD1EhJi&UG9j8k=5xU;WXhuMc!M1{C zi_L3&dw_Q^eIx%+C!7Z+ArVzvjYFMAw(&J_U*&yNGG7Filt*8SM;mwlRBy3CxJ;&bsl1fBZfe$GCWl;?H2^V|qAW5q5<&1Q6aVjn|7YO;xH_CyM~SBi zT%?Jc=+G)6vWo-}T<3QafBSI}I8S##nJ`yG7LplKsS%aBGw?Ysmoq#UFG2loR+JWr z%76^4(9Ow-ejP6$-gt)t%(&!DKER-^m&bCQYF!B9|0y=b@eIy0tz-#ovIq8{Z@l8Y zR=lkZM-~bip#aFAb}h)!L|L_{hjHQfIArU{3w*@EPPD)|Uld|*2OrpZRpi^bCS`%F zM~QqV-kFX3Bs-fzJ~V-6`}X>1rj*RJ;0Gc+!x~nTiseHg?NB;OHc?gIUbvfBb8rbXX{Z{-3OqB zR-gr>E<@z2AAvV>?soexGKQ}}tg*Zgbqkkoxd*E=Ak5W5Umj17HXPqb$O^aO9r$#s zeJ$6Id3Xtm{dI6aaMLhlXhEyK3u_)&4c|)J<9gZY{TkfGCPR)$8giT#{tQ(Vaw0fb1yN5%&Q1WYZ9w(_ z?nqBz3e`5$mgp?`DjhvVHImNWpoyM}Ctz>{F?j1?5>>{013YC~Dzw)Q4;a+4tUv!QsfH=SJL54k099w&{2YiG^DGDcIj!P=ME6C=I;$CA(#4pt<-; zYd=&eL)pZ9{RU|THL~C8=kZQxIC||M+(fUwfb}~Rz@KO?s^c<;QdE77jl|ZWu%`2TS*exdJMQ!(w%e;4VcH1yOk*-u&(~ zb`=;0B6-Wx?nl&&F&Hq7(ksu;l;U|r22PFkq2)3?)4sz`)+@>XA4B}}%UEQy4I{`H ze)Msf+#}MNuq?R|umVSc^JuX32P(LK&_|R_?bs{;=&Bt%fQnV(^lZ{&Dpr?w+2gwF z*dw5dtu&9IBe>u-=05z0t;BkV^ZOE+Wt&JL^J|knxTVU$2oh2r@k2D`@#laQFj2tTX%$)e6IH9F>MUEAY>w1Su7>k#Bq80bLs3 zXUdMJ>?}@_AWqFni!HMRBOz~+|H7Lkc%Oa^HWl>`YR9-q0_JYXJsYipQq)p7ND z{}gm69`4h?4OlCWn;F(++RkRiJSp>EU)y;Yn2S?6<3>zrj3?KBKjH!H5;Yg8YH)}2 z`JULPw71kgBCaHwlBP>kP9Niq)zx-(p}X2K!bP!ijx2XTCws0;Pq23^6jFlycZ7crY$oh84do<6L16E3aq?DySgX zFjh%i8sSX*AvHt*GZ+~emP)aE;aV9)uMTgMwpxfEeWZd`RyVDHd0^bK#*d&0XkX2j z^n_Z>L?7d(hFeGiAcG8aWg*lESAeJf#^cUUR%>gj$H@gmC@8;-0E3s!71wdHNJE(C z5H#B%;%Ouzty(DCcch|!ltlz&{vfk>nkPCPP;$`D#=JDpg*h=S@J2_l00+l`!0k{~ z3L$bhOs!qG&CxaoI7{nL>u3uwb*S|)kRnIq#9k>V7dD|%;U*xyGfAYJu5kuZPl%9YPff`QoWsg^Wj}Yo zGkqiA<*$MlE6*KdiKqnlO1y?@ zJ-;%0mk}r%TDYm$uk2a8$9;uyW8zY;d~7hMuu^-WI$W=XN@2Yt4Hq$^qNU2E2lG?x z%<01aFc-2rVgi?D<>@=kjz=JQzSX$2RM(i&!XwAQ;?+o4n#5n5(=cYyVw?;mKN^L& zG}eE%5y+sczQ7BVr~-iMQO84ktBm4R^f}zKJC#v;1}o1FHa45rlBk87B7RKwi)KBr zGUENk*)0B~S+6sPqm-D?>wpU!B{m1~#dTTd;u~C_H4iP=RD88jTo$bL#CodzT_f<4 zKJ2Mc!P+zyW&=0+yFjer&eq*e+!mbMxm#ejF*@6W3P%1*x_j{`)GKOm;(@ik6&M`; z(JJO<9l~;kl_AaWtx^ezv^?@RMT+ZyH&9xVR?G$Y#Ge+v9kpQs1wV6Zp*PqP*HQ;w zGJGo;SoAv~1jN%PqXAB#^1FFP;R?#c9Pu@{{pgo1^( zgfCY;2T0-PpN85L!^+=@Lli1kEwU>SiJ%_DA#4gfQuSd)nUDS!ghhO_5B8RL*c`6P zlA6>89wlx-e|o`sg-Mlv$158)6%TUoLxk=iLZ@cCA!yy4bJX-I^T-n);@D~A)&t5- zYe|^D8(|D1--Fy$2mX2y7B0X46MmiVuLGE}bGGqbDnICKs)Z}TgV6^dJ?44BqSkr3 z_n=ibl&~IDlRAo`BHw*cD>|O?3+VC^p^01ZO6vOh;Lc)T}vfhVyj?CsQD;}{6f$Os0pczy*m7Sc9 z@R$dupmjCt(~)r;Fr@F{2+vA&dZva(dUb54F%Mzbm4X3y-V%C*4*e}b*=)|jMo%J$ z9KM{r*;gohhY$81mhGjyhISQSKw!(u)nt^VPSTU2gN(DuQoTku8Am>x zl;(^B#cSJh6^%*rJ@G_WfHP#G12EB~Dc+4SFvZhJTF``aHg_kcI?@IqX{r%)rKW1< zJIi`3Cx?(SrN&RIh!Q65S0LVEp@7rvYXgF?BsQQa%}_-I3E@SO2+AD)N^F)2-~~9J z3h$1*ihnWSTaNb{_*5o-(aFYBu3FNNgPbATS_D|*hNH2mA{?uF@3F$l(QVUB(_y}x<;Iz@vDQV}wRoqDsKA#fFKY#-UGBsGj z*!7^p$6x}$S&Q{pz_|lddq}W-f)8Hq1(vX}<-=iqfp=JoL1;2AJv5L{$AQ2VEfDM_6s>Ac0UUayeQQ`7_%!1 zV+-sMg3u#_cUJR$WAS^I^D=g-EkG5QgxW1tt$h>sP22GT+2O+)-V@H1v4q&Pw-`3^ z;fZDVSBfk3Ab&e&!GFhxCtk9P(VD-V*1wQqno)LZ{&reFCdG^mp_uu$19n_jf~r>GX`nsfV5mj^LmLe7c$4vjg(0C1e>?d7kPm{T z$h;yVYVJgO4BN`PqdzwCShssoNR2@xT4@>{59{T4v1-!iKD?eoG_ zSitVWTLN8!o4nS2fX1p2-c^0kK|hGgbc40xL+qJ=u>~kQ)Aw!Y$RLiYRO^R~Kd_hk zPFvx|E`R6XX1Cq}p}t_XTW>)y1uoA1m(e{f0sy?iEBWeB;`_gaDh7u@1WxOo3d!IiZXN^XMDQ=m-qw;=kb-S8`8+DH)DOEkxk%&m z52)ZDQ5@>x!7XS670@;D8DP~2iqbkgM8##>8`gS+RF@f6CTpG>iTxNIuuyM2V8bxg zf?=Wq!^JiXV-ql_4@;{X4zE;@oSQTrzVNoXKag#YPoQ|9sXd6!`B-dtDvtjNG zJ+?)xtJb%Pzvw$ZhQF|6H!h_jg=KEmQQ$7kLcn1KUqRDIXl18fxZYf;%JkZ@_gF?G zUj^1shV~>k8d5*9l5bPEa&D?QK}vBUjUCPJj!Npx;clyUbwF_xDoOw)xalXtCd%JE zO-{#aq0g}(SgU^PICfv3iw!rd?AU>n@N4oZ?ape@RV`yc^HHonhE;9Ft( zFXK4}Ei{h|sqW+Rt>z0x!;He!MsO9d)Uqi}3t!7I^H+D{MSprJUmLvXh0Viv!X7c6 zFTs-n39QgtSYftor{9H9YJFuWMs0t-^-~x#*I|l_EFiTIuVxKFIU;i;<)OIyjXb z;M;A%sGB;aH#)J^37amKAqy(AllM=C79$~x5ez58229_?9>wJX{I8B-VKZNWYjI*A z12Y4=>xfPT!R~fc&(Hxc|y%$0>uc6kro0wqGaUy1dOYg436mV2~Vpc{32?f_>rCT{DRz?b% zrlC+KMu4y9G%&Z_Ilir0Bm=UKt2dyS*txLnWh)fu=et8jHn^SEaY z7S|aCb=qsy5VG5Jg9_CsMwZoTy__`SU3~Wxp@zP=gpas!uavxte6}=B*m3H4WsJ7g zp!fw3d9_EFQ(i{d+}`->Wxa7l;K*O+LgLkJc`#+WfL+Zih}}fu**he=J1dn%eOWZl zlOvy{z=1y$N&IwvcO$Q;s<9)J7v%!A;qv`-FS(z-fn2uudl1jrSiYGX(Duf%3Ue3W zmEL0E6VhBk>5~TnZOgvFcFXI5-p`e|rzu zJ_!0Fh=hG#VfYMIb&MLh8qc9WkBXK(&G8{Xa{p(vu@%+xo61(=?eY1@La1_L;QN## zI~OOm&~_h~4~8_{T6NyPP<;|Y4(_(GU)<}Kb-`s~xjZ^s>Ay@{(}~ZaO8?mls?l6q zA*_a$B1q{?{3r|7&Xf^&pIoboi z+Jc8k$vH(;hdT~@ALFK64Bh~rMAH9^W-eZxJcz-0xj!BQ7N>LC^&GhCz`B90oIj>j zu-mwN;QRRAk{UN9B`Ujpf`+KvJDNX;$Qe)MVCZ6juDj8Tzz5P(>IT*iY-V2tnOq?* z9(as>`M_7{DV``lmr0#={(k)R?Gbt0KuLuCMlQ%euiBb!BAZ+M{k1hcM?m5h)Ft@` zL$DFV&4b`($Y|$KW#CKfFe6RyZcondxt*XcK|#))^vh1a!rRam{{Tzq(J95)k$3br z{OkG)e+WgKfRsH>L4EWH_(YAM$6WNN_?hrM)xN*U$Q5nEhuPt7Mi+9a58!TDE%XG? z5q*=-ip{SeNm`?S0%0x0Ud}ho(HrpQ1nMUfi>sx6jGk0T$U2O!KXKgySTiitT20iA zkq)B#lQE@p6H^++Dc$GdeBJ^E+u%ESK8wkQ&Cxpb=N|kCR(FZsi>X-zzb9+EwS zF5Y^#l9M)S!qzjq-HFH+yk`2@YK1PdXAW@CO1aIUmNTK6rwv zm0K$?t~qHb&idNdJ+!@_j=?OXtqykjL}%oKWF)?eoUW~H zsA^3!zVF5DHH=^qBfvRq50HO`BB;mgd<8g8PS3LvPGwmYidX?=xcPaGUEz56nJ>UE zsmRBZkkN%x^l+iue|ElRs4R{5k#!y!f!l+*gZ2e+PV2bVEUNOFizKSm67L1=^avnGN`@*Y|67 zDtUZg#cm`-Dw75otW@-1Cz=4?6{xhZ$ob`97@ zTjOh%(F?x9%DtdRn-5vFO_ErH$++uHBR&7H)Qn9g1vKjI2Bd@1N1mD!!VnK^zZG^?V1s!nhPf=p> zM{bEl&&I>?`o}sSi1Vt{IY_7g{0QDT$i2$yiWvt)ZYq_X(muy5EnlKyTO41TjN>0+S~e#<_dO*5Q;6g1 z-5UhQvYp9~-1#j`%Z6qTRAthsW&W!$E(0coo~8CoZqtjuyiFh4Ml0DZ#6CBVT&57s z@6tjatrA3^fzd7h%8ExPsid_oe=n${rAi^CK_MYj^lE53z1}~#^b_j(lXqTtn124BFSR?rO)tQZ>g{XJTgP9K z_4ym+kN1<2+idy!0xa{tD1U4JHTf&S^r##~{;qd#6y5+=Ah^(8{{C^vH^^Thj$g8@>G4{Nj)eM`1*+?kP*3r zmiu}M1fEYSfio3XCd=UsNphI3fDSY59=c8$21*W-%ma}_{o-UfgaB%5D|rvr=Bh6} zZ7glr|fNN_Ze|mM*-vmxdp#M@Of7;q@ws?Ok;;pT%PIRuMZKTt=$lzM__5$av=oiS} z^l8DS!8n(p)N|YGZ-0y02z?YWt?(X7FZ43vG4GSe#Rs_P6?4%uF4yG3f#cS^4Z;m( z&}U-JYXa_r1x;A?khof~2a0!(v##m22S318e7}S61(S@!)Y{k-q?|dnA&;fns=87Cm8shX*Sb7|ctd4FC>?9-_Vm9#0YJ3T~3JMW%tOr1x1E3Hlc3 z$HJDAOz#um%UR2ct=2Dd-oV*#u&IT!b6__M&XDRsccoU}T>4iR?dIyLFEXnB>M8vc zXTw~n$Epr{Qs1vKGg3E;l)Z|sj>GF0ZSbm)N?QW=h|&cAHh7((;y+2qm2dG_w#4d- z1P$TKF>|EODqW`J3ztCH8cmQp5LLO(*~rz8>4)^z(l0o-s!uYYFV2&R?X7C{q*hgZ zm63Xg;-{%>FYD{`4*pM31Bc#K>)i^$KX0#2tanNFx8d^l#9F6hArnhoCJ;kGKrRfd z?hzNl%4@TKD)u}4HF&qD>Gk^d^90JzGN=e@^m>7maMo1)EkmzM-Ckv;`7@NvVUVg| zRp$23)F4&ckHGQ6AUnPb8rt`Pe1kj{0w7zS`cV@9i{z;a^WgF&*ED^-Jbe#UlS%$H z@-$TS`ahE==TO0SrM1#Od%+nNzFNutO8@vo{|r8f{%K@IXr2EJ{j+#b6287(L^`8t zGQJ=phZ#t=rQ{1w)u$OsO7>NmG`1imhY_SqHUKOUF{2mAxS=CTLVhh0!V6#R_`Yvkih z)$9LEKAiQYo7MVd@;TOxsSFSgs2P))#i}j{_KwISgd+W`!=#A_Be`LLR`~3d{ z>&+b)4%`?@& z@>OTX+x3HpxPJ-L#)L#yF%b;J=jmLAu-<9LU&;g7<&>V z2CibhSfFcUMrpIY_dIwfup#%9exiIZ{Y~Jvcu)40s?R*Bbuh>Dy{S7tvcdKZPt&W1M!0NBc-3Q zBW>m0jH+W9sdcIQkoxiwE~KhDO0^csXR^GHF0nU4he8WDc9aWP%5B27hEuKhxXh0 z?T0Dpo%H&u!+mjMgkEqU=0W}guGlExp#$y^nhVbXcgVhBQhZ6|iB~I7B1{sZjYKph z(*iX5&ya((07(=vw3RzDQul!j5D^eUQN|)ids=J;fl}Y*w ztfjU2UijpDPlBVp4vtO{94UPQXAprX8$YwallH9;?e)#c_5a!J^glW|k^bL-6@am^ zE5J)!1pd4A|5-`;-;P~7`x@@0m;BXwVPOI6fT{@J^#qEP=-{z~cFO8|m_ zG#u(R`L5}4h6BG+{=cvMXC^EErAf+v(V2>8Lf@?XJE1Cg<~~T<|0(_dbsFgiQ{X_ZVEIx9I>!JFgzP?`hn^Sr#t?-!A`{}mczm9qzi8XEVFaIO@ z|8L6DtH(JTrT$ZC-?h>I^ZWc)>Hja=>VFWSt^NnM1Ltjtu^C2}QvX z1wq|)AP9)D@ZVPdUk|oCPX9~3%u&ggk+1B)#P5%KJ32LCKYhoR4-1-iMn7(R-jB-w zn)OSJ`~&&~oLUzZ*FiL0;_^hj<$;}MP6j!{|1R>W*KOjKy!x;K_9(b9&piq`nEGii zib$;bxOg?rQFF0|FVRYK+IRFrU+afZb*13AEho-b%Cb(*%tDn93!6XmHSg%uEYJ(+ zfm0qxHGhfmkSuix3d!f{QXixVu2=3 z#om>8B)Ah9Wsy0kz>ZD_0JL7%yu)YV zJd=Q)aT08Eyazrqx128c{>wMu`(9K7zAI0FZ=8YW#*y#n4j`M5o(_;4`vCTvjo^Kz$>A2!191 zjXda%gJaAUXwL`c&XpY{hOy~mbmN%-MZjNifXbNly(gUCr3Zy$hB(9P*stpLaV!l; z<;b?(f%(#Npr1-LDer#%|Ht0Dz*kjV;o@`Nfkuglii&!~06`H?PEL~ZOaeK)lt<(x zAQ3LTiU{3p62Y1bI=H3aosJ)5&)WFV`$?1(5)L*KWgolbmkD|cNh6D5# z`gA_49@yFV6}KrOo<S0x zK#IIBLHUu^gd?@mfS#(AGU=+-RO0)JWZDm~DYMhZy_oaL=39FExY%k!x| zUAE81NRP^X$kwq2Qhtg>o6FzO8L3{-Aiw@I9jp-TBN&&{WtUjLlv0#>WeI zcG?E+UlOut2eDAL?HSZTr3O%Cm*YW*UKo$jS}-g0D~Hujwl}>3ID>}w?np)JVuMND zgBS+``x^$%J(m?2V-!smVEhHfgOr0$+>AvXZy3^lnx&7ag!iQMKhH64@>q;&k2Qq_*wPUoHkLC)k0|XuCUB&IgJhPQ z&Jn(&m6`suWyi~MzTo*2Izg}a;*Tq3k^bOKH8ZqVv}E0dSZ0G(-$Ii5q`yBgUA`)m zj?7|gn{gnKLpfhc4ZCS?6hDw}R`Xe%gwW{_DamV#i$sCfBpFB22kCvnBh%<(>{%z& z!GKG1QYKcPQ#!_!;RO}e{;I{t^;o->+HqEO2ByJ-Az zx12#&=p|q`pgt|f)zjBx@L{WrSjn?L^RoLcV zvI!4;Q#5=^JOdAB{CO{4V1@_mn$Bslb*!fdEhqO__tu3Mb$go+G$B0oxlfv5RGWQ7u3fOqjx zfqGnf5rz&~WX^vmeTAmS(or;^U<($}5o2gNzfi8n(*7Xh!wYzH=I?l}8d)0a$Ci9f zxK2yBDYs)Z3FyL6|BiUhXu-2EOPpi)YD?wz*X(sqI1{gW`H%(URQj(v#!$o$TKzAiWI>CA`qS+`b%p?1Ek=nU2(< z$IBOGp0(s-^!RkrpTwo4+AlT#c} zIdvDk4-1pXz~^5O>zj)#FBCaGX&&iB*_unRmh9Dxk22waYe`0S+l)t8@31GhJix4bYEk%u~)FXWgP;$@#1%|kh++Hc_fpY)dQEkmeHQFyu7 zAIh64gMLPvK*sp=@3+6+U{iaciSS5ThTdS9e9&O^Z{vGxxOJwrKb~5XZ{7T3i~h#X z8YrNy0CyctohT_*Cz{)&I`Q-BV=@MAYi>W(lwNXZ62>aLo*j<~XIQpDmP@YcEt!rqF)bYz5|Qv*r*;%9`Oe1p-BUZfOTM!+ekah;A8%D?!ps2v z=8N%;5+`3+X6ZO?$#-ddp$aL9=6z%gKhYnhVtcvE^8D{B3YrjGV$>%m9at{cxyx}& z4DgGk_Y6I_FZJNF@#2q!o5qr}mK;n^oVnznlY(gpH&v;{(sA%DOUHqCEgk#c z8Q6iYkGI}-eAwezOGkT;=PVuXkUk^1J~=y+%L-j0tz!}#6to_aR{fkXY) zW39!nG$C<|L;bduXZN^HRdoQ^Nj~@RXGFccz3#ri8On;;%>v`%=O) zQ{oRwx!x<~dVNZGUrKmUO8CB%^oml#6I1xzDcAie*Q-;)TT{Zxl*Qo>79(wmoZy)xx`G$mY{!aqGFT%W>!W6JdvDPea?cuz`rZ%Q~b zCH-9~;Z-T&Z7K0RDc9Rm!i!SEJ5s{iQo=h^!rN2A8&dLLpAtTp65gK@K9CaLo)X^C zDSUDB`*_GYLT|rn$hhVc`d!jZoKL?UpWyk7Zlbxpd0R^w-u!>49rF!OroHb@(jp#~ z)7|wF*zcZ60K1Ui!`xo7FI|6afwZ4`@!P>^p$WTjQU*Ffk$L&^m@F+dv~wU@^}E$C$U;M)O;L~IyTb`yWWQBPmf3Mi)r43a;Q}G z^~iBlP+n5pOJ!fb<^vc}Cu#NJJf|Y=y_9ZIk89~sReZjY9^s%oo`RuG<+FGE+OTh4 z{DdAB&}QXB{q~OUJm0pbg!iO`?VZ}=P`~{t*AJwGH>ZTRr-VgHdIwXk|Bw=HO9^jF z39sg`Rz27oJ5#Q&=@g#CqHX?g1{#v$z&=U2-U`X8LtE5dPzzEVFNqdgX83T!j300h zhWK9mq!x{#=PlEd%OKAVcwxzhYhr@l`{Nv8xpQxZ8?F+Va#-zD;5#HImrPwF%A?_{3{?=~z z6iIPdpvGH@i&~M^-Y1YVi*k7k--eyBF^zV;Xl1&Wc8_VVe91o#C3bHa$`x&XZ9P|X zsI2Hvt|<1wZcS1J_(iMD{Cck~#Ye1g#wS068pD3<++MWaN}G&(5fdQd4yr^e zd}Xl)-fF;SU9qm_opWtH_mmK{o?D;9Rs!`u^crepl%6PiS0fAa2YwFqd!kc*hx+YH z3GYY=@9Gpr`(v?TxPLc%sNaT8>7oC19*;7{RfHE5{V_@8Ggt|mC zzs!oe;k&#D;t%Li9S$K*KwA5Avf?v+N9JtdeH_q z{jNs%4UNyO(b^FPJf`c}6K#=~6{i~oT{*M-&I4~(JgKzSe;mE;9mDY2TgvwojCUs= z&YS%9a9U~}6fW`{o39M{6rGe6C(~*s%2i9@o-!;Do=@Quvsj*OUF2IlxF%j&SzDV0 zKgb;~n;jeIKEJ?S?5>G7Cd%R!vF>FR6|u%fcbSQXiwGkIJ_$6qs-bRnX<21u1Gutk z;)&R-Si?_Aqb%W0RL9(n^|6YYs+w4(TT7#}B#P_eb7Bn%WLGjjQC$~z*Ogx$t4N>( zi8>Ign&K6S8jvf>YHQu~Weuo6BGxdtb0P?>x$M}oJS-9jX8lM)mR<3r(vGPgnBP|( zC9PJxo-{ShuEH4ukIhH=F>~<$xPF-Hzy2s?Hka?G)PLJi()y|WqwS1i*WX3u9+#*IF?;Q^K%lsN1C5gigx56WOL+;|M@Gw(&}n% zzekVJem|uYdi!laTE|@f{n&M#Gba5T?T5B%)9mVFwN+Q;H`o8=|6u+9{=cdI=JFpv zo?X>3m3E}@&#jH;|Eck>^(TyfX}ykK<|MvcoxEAve=}_Z<0}zvy+MXo0lx?DZjs@S zfDgj2(850g_rVvL>zB(htlc^=-XG!pH_Gr#gwIEKyMEm=FrGvF_(B-jqe+A#2xov_ zm-9skCr#nY5Z(`d{dz6J9+a2jlbeHZktr;x#(&LdtgFg0Ojfo&HA}Sn_*kN;AtXUA_$|dI4OVeDhn7X1{ z>PM3O^7l+`xH8VBRtQddsbd%Bi{uDdm+naRV)_SPl$qA=z2Pa_6$l?SLdr#RQCHx5P;}pb+=>;4*I!;a_J7(x{hx6&|BbxK zZn2BwIXfD&(u9q%q|@y-qLHWF3?1W;7M$dfiXk!18N*R5E636?ABtxAS_Da3%Gx4i zMM;KcIWmLx&rIKrZ-}@x`u=+lmv7O1httey>9}Kv5PwHnNu;r+i*$%@rmM3ac)-_w zPv`VoS9K2S>6?GQMLr}gk|EXQUkIO$N7RU>?+>{VMzBlQ2x`V(vA6vS@^Tt{<)E*j z`reN{6BB(X=tZEdcpxi2&qS}F)L+7WBD|Tu66E$Rpl{8Jr-Q#0zn&Mf;#HuR;I|#V zH>L9zqzPXQ`gWwf;-RcKeHX>^d+-2{B>so7hxjOLe9+gd#xHOU@#8o7hpae#iCe6J ztqnX1bob-fUwQ&@K`;3u(gDVnj41jO_8!+s{`$2j&-$$RXwX;Sx9Dlf-}WTZ1iljV zcKjB#WyLd*PW>}TANXRTKM(o~h)?uQpaYkHE?xtMH_LMY$SWY{HQPJWn?O%OW-GRV zZgP`=-7lm3M5lUbZW?~ukw)9Ao$2F1-w*k%fj9H(@&5n%EtRvy;L;K~`->@4rqH}! z9&^_>)Xk}>j8zKE9|@Z5sEoAbrEOs)s%sjR(ZI$7W{x^{Rzs|;^8C3CHHp|jQCikm zs&=(X5x+s2GiC1CWwUB3Zvp6%WU5TS=lp&@b=0>6OMjTu~jXxIzfimK;)HqPwcDL7S;j{6?Bx zh1~U<&od=xO|_Voi4H@EXWvc?x^=`XjjA}M#nG}vnJ6tUEiY?~m9l6FY4W8D@mQ>~ zv8o}4t8kdiA^rx=RBMB}xp6sN&yLNmYnbmwN#k`1Q7SK^#gt{JQQQfn=FS>BZ1S-2 z?&6U{#*P@~o;Y&U1oyb17Y-|)cs4|;$+VO=M(dgqEXdMQRc>936RE=3t}LS(vyh4n zi%>Hr8wIDrNl2*glFDscmFR?;+4Z$CPO@{Fq;SSX_c-1@HH63wFQXkEk1rk;MQijU zm};#-!l?ZdRF!+OkGNC=s-2D`S(aL;aP zOwbsCkwQxwUB9Wel5}4mIJlP_o}wHYw6U&n{_OI)TK9l{jRQDpRb3OZ$?6A-ocfzy zUU$DndUb&=JldARHHrKZ=p zaNWz?jabD%O)IK#7X@n80Ljt9h7=-ERud;!hq|KSsKKGR<>=IMumhw4P7Bmh01h+Q zYZNF7(i<+Ok>bAqKlB z)#elsdAdP3fuvD_!{wkw?_70rH!xB&wHg(05DHB9m}u#ab$o++EMy^?;?-r1)kKrE z$Qn0pAk7)3UQdx52OYT!7&@kO(9~T9xo1IE(0bXaq~LhYHsbNbr_aEK_NfZ!#nvZZ zbwQ3sUDr(ASfg8lDu$*JsJ^biS>yVT8@OnkAo~oKhc3-i#ywY=d~6-pG;VVi~afn?pajUl6ea%P!b(?~;; zpjfeZUDK>;Rf9%>I;1v}-bVG%wHOj>bYqk*BNJ0v6Jq{Y+!u$pjl+5jziZ$>gAXGy z%`Swv`qdYx8Trzh8SbjGnp#;->fiCk8cY;u3QTL5Xfed`s4UGZo&wF0FZHPSH1)tj zn2#u!3NQpNh&40{G>JYevf>wtnz$SvYvPr$c|u52tg;SGY0?=t53!Z8+8CMRq(8-& z(|MN>mB-GV+pIc`)KzFVP-Km(87TS<_If;xrJQnUzuHRCm^E(TLYNkV8iV7m62mlW z0|OuFxU#BCso-*erwk3(uQ97%!$2{1(&*8ucgVv7;L**(W7HsZ3{+FGSTrtJGH%q^ ziNnTI9Z3ci(&WPc}DAXd4 zWhOdVpSo2qhEEz>Jbd(!5m=rKD;{kGOQud7Id1Hbl2O7lIDo%A%$Hbg@wO)BE-XN1 z*UgFXNeT{;;Y?$aD3yJ^v`i0IpdV|O=B8X5KXG)lbo995A)^Ju5u?Wq1y-v(IUo+B z?NF$cIn-F!)KC$-K&$%T3bdKN$*GMss%?&7Be5nlCMq!%37!wMH5JC4E2x9ea8X`X zafKS128*GSMg(CAPtfN1($Q>X=@UtLQw?lV9y#bxhbSq7v|xqk;}=Go?qUEX%VtVy z@&klcCq3gv)M|jX#_beCu0RJ+1xCaYiB5TF{4j?oH%;eEB?xvGCG#iJWK}vgHaCmL z(WbZ(mA1Bp&D3Sgs;Zh&Y!ug*;s}g3z_{nuB&v}Y(lM^q)-_@^aySP@`@{FgMP+SW zeGH?4{LP{ne2|;w*T_PXQYhih-UEW2OOPW^GM*Ls`Roq)>sZWBM-`j4Lqy z1&rTHx3&sE3#Y_ZTmC}ctB^NSgf`LERq+H;s!vohR8bkLs8ytr`4~`3hvS41Z9A0A zFC8_uc>J*8gwVMN6+?^Vpv#MPwuR<4l-1Xx7IG{X;R;qEOePg%PP_Yg{I&B6I+L@Q zIPfrP<>91?!%0<#lWMz&c_q={dR&tg--NyVRR43(mzu6Mn=tWy3;)X$T51|PUsrUU zM)?nfFVxa%(teMXKgIv4`P1qOogUjd+ER;FZ(3i1_1hLiZle+fC-N0ULD{7#bEVstIac zhk(@lnnplU^_p>KO9P3M)utMrOv#$n?~Z6){fTVKO6w zQO)_pAjb6%c2@rdjlr&Zd)9&lH0Wq`+S4rSQw(X&r{4j7~@u0y6nsU}wiLl9a z?g6H&W+SJ*tOoX!&PA(D3{qvZEiP0cG*hd#wr(zl2`V2qDhznFLsUGR_F@bR-~zc> zgeeVG6m?ZqXbSBI6*cayO52;`vU1SBBQZ2VY?ENG>vl_HViu_nubso3KbI@g59`Oe zs>Vpa%3#0BK`dNgj8$Fo3}O?H8Bk~Hf@(H8!o-cap$_BXvDlDW@;S3-_2k2tjc&5~ z3I**C9JYU;r<-#12-XF1Il>i)vCF*~eGf;CG+5LOf{i_|W_HtTeQ5}F()mn_KW!@d z2~|%i)BjK!+WI(kb6(rtLdD^>znK-k1%3FnX_F8Sz7KpBd>Fn2emZS!%v5=gH-vR$1d>i~u_StKm1m?}8VlC=0v?z6gFI zd_DY9_&eZN!?(e2hu;IQyMt!@U!D4|PW=aUs>o7`-CJ5X)MI5X#Lbq{*|W>)^L)Uo zt>qPU_4CCat+ew}M_Wna6Klo%S##;GC2n|ERMR6zqC=wVr+q9Pscu6m1Bw>CVT}7 z2RDr{ZMp$ZCrmay+z)o_ENTV6-O*T&yI=`A&vTyx%cY8HF3tmtWjc=$#w`endxmpP zV_b`oLNVQj8H2+Rb$rjE z*nABu~dRmt# zI_*;nF+~rv1caEXhbPtJemd?OB8=_K(qil~Qp1#M;o-4F1)a63(8F~Nv&&GJm=;D+ zhs~>q)$9A_RXV+H44(u+R544@5SKRUO);vqYvU^T*a6G2Mx&LG!XQ5FSW($fZEcH+ z?#?KhUS%wuCGC>32G*gudYH}tO^P>^)$oa7(M)u@wK)nq@Dr=)oDiQWx=E*FYXdjC zWAYOEq)wlJ``{Y!Hlod|C>^4o5kLW{gZ);ckD|r5MteY`jhi3|59(oTTVu&MS7Q^a zHTw7_Y#_{z>16RpXF636TdP9+NuyJf>dYH7+GIK{s}WxWZ8&balwLxDP~sAQAv!Ht zONTT@>zbhTLcF4Av|FfCg?NML^i-hM5k5e8#Hi5|=&nuausCh2iVw}S@wl0SLqyd6 z{{($hoN^h>X9d1j;YqmtGbnbPg$>?;g zB7TI?i!esfXpFagYvD24pgNqwb(KwM*&&scs%wZ5o#?W?i*ct7&!O_Td^4_V!ygTI z!?jarVnx?uTV;wKH86l~G@Pf24W&KRkueN6qL65O$H-`iA&Mp^(V;3_f`~#yRanY#m(r&{7d{d z`1kow2>1d;fnkA?!0f>0z$wAAgO$Oy;9J2Df`dZEp^HLShHeQt!rjAng|~!X4rfPZ zMyev$M{bOK7I72|EErXAcflhC>kD2e=vz3X@WR4B6h2+}M&Xx*|13m7-9l`|&;6b? zo_MH5jAvu$CX6D?K^I*>NIWOhx z$?4~-^fmaF`&xZ_e5d4|otu|ilpD`omV0;Z!>EfRFPb+#Z+2cXZ*yLI-Vb@F=MT=0 z=3kzFRsOpCKj*)lzbF5@eBtlzKgU1Qf4Tp9|FizB{xAIJ1V#p?2Nnme58N0yEtnhp zF!+6Nd}u~!R%l+RE%Z|8$IzPatKm<>KZeU9n-^9A-}dkEPYx^!To?Fb;Hkiife!*_1Ro9lDYz-PJ@|I;yPyc28yXgx z9J(*`Xy}KKGyG1tJ$z>5SCQYKJi8)k1;Yw{Ua+JfT6j_6LXvTj5KHls@GSJ)>3QAr zh3A0BmtC5DMfR_<@5|nt{eE_?x4=8z+v@$jcX-aMoH4%fzB_Y&pZiv>CGY*bkMczR z`T5u6FU!9t|M&Ut<$slL_Yd;>{FnMG{0;uO{vH0e{L=!Zf%$<)LQjOA34Iq5;cnqm z!rz5QM)C^^3!W%=vf%YX9FyiUI6UbnMbh&d&%K^EJ-a;j``o#Y<-U@)Gw@)5G}a0U{1mEf?Em(6pEQbcrYd2=2_)= z(sNw)rI5fa*>`0>kUhwopHq-?SxzG7=Q(-4p}t@He(QVE_pEQH?_J+s-)FvP?x@^5 zbAOlnOzzIy^1Pb7tMabTdp2)(UU`0PeqS!-Oa9mWKlslJL<5z9n!x>mKLpwWZv;LK z><@e!=pO75>=n!k4i5T)BZFgu6NA;k*};b3b-^2hw*>D;Z5|K4fOh?Ra8K~l;1@xA zs9WfaP~Xsi(4devG$J%QG&2+nT^_0rHHB^n-5k0%^kC?b&>urjhF%H17Wz1}FZ5;T zTj;?F;giGXLMFa&FkBd(7@iuQ9lkldCcHWPV))%~2Q)T4a#rO0NKs^Lq%2YwSrPd| zWOL;0$cK@yB2|#l4^p>TcS&cgQ!MLl!|dNb3r&Qq1WBzvYe<~=T_SB@`d zTF%EgcHao!Xy1*wx90va_qE)Hyj6M0{4Y@3rr`46Yr*#5*TI895$+o<3J(j<2;Uoi zF#J;Z_3%H#JtJcx3z7G2k+&l6Mcf7F7JN`Jx^QLTZwg;2{HRbQ<@n`7KYGsdlINeE zv$C_Z$7U~wroNy3N%l9{CwR|;t}gJlcz@^p(EF*kJH~`Na~{umD(B6d3w%E4?rfhe zcW>_QyzleE{y~B4z|=rRU_;=|z}JBh!K;JK!Fz(;A>B(trJ)9lTknLv37rz|AHE*C zbXp`cG9ofM@@V9bDBIr150UerK^LMoEh~7qU`@eS1&0b27d{U8JX^S{a5wd>6+)09 zX!G>-jPfKr(d^pnmYipDHs>7g8{!-3d&&2n@1MT>+;Hx7xqrxApL=TF4emT4|d|aeA%2X10IP#~6XvLl% z@|xi(M^0CJp7Olx+2L_zr)Qs)-9P)r?Ax>N&HjD%`s_{Ff6aa)`-AMSFoG6%M|mfC zFGb76z4N`-dT;dJ?p@`b?YqhMqVLk&t8-W6Zp?i#cWPcoUXOft{`mZ<`5)yk^RM$={s6%AX&&G*A{;9#|RJ7FZcvAAB}=T&P#*v(RSr^UuTik%Guok?#x6 zD(qJ%+PDcsnx}{7v+T{@z248gc{!1st8xzd`sDV-9FU&(L;k6Lx4+Oo%>R`COaFKN zZh;X62p+&tlIlo_joxdp3Gr_3ZY1<*{b>%qcWCbT z-16Lp+{L-KMND{EmF9zo);i-|JuOzlBz3`_Zp3FQj`s9-pVgGsQFA zGt;xkvl6Y>=GpAo>e&Wu-sx%g{NNGU_Uu8~o@`(Cvg~xP&s*Z1;+^iD>0RVqiGJ1Q z-R#}w6*=iS85k#f=Zwyol2eeKGzbdaa@4md%c@O6A%kSm)`zQJr`CI+>p)arTKjGitZ}V^VZ-Xt@E8q`I zgl(}pumf}I4*`L>DIA;$i>@|UA4~)n1(U(0n3+}tS7Ky35KIsC4)qDSL%z`TP%^YM zv@CQZ?4Zq|t)Xq99ig3}U7>c&JtCYQ&ItDjyThZywc+~kitrubRpHj~6X6Zv9pPPJ z5wS1R7>y_On+l?MbZC8{Xg_SuGmRV)?Xx|iHt#7?g&LpIxEb~10_h0uPmjJp5+{FGF&ttCcp# z;D`y&FyX}}{7VyFXTq-lFV^bvxrTq223yW9={xb82cHzLYh2i0*U*I;j}QD;G0&jG zcz&(%O!M2sA2odL5FuXH@ZTIle8_Q+KP;{l*OJ9T{gYZMTTIy5jp%&z zSoC4cw+4lev5YVi3aZDQgpZgdg@wz11!KuWUzXH)&ViV8Ojq?f4o7jR9p6}qqbVBx z7ap{^3OFfFBUW)6V|*qZ?oq~>j9*|ZW%75%gPHy#;|my{i2*e!f{gPRhZv7zJeqNw z@fgN8Gp=C#Amd8L^iMCd-gZMEgN@8{JM+BFc-Ud>xF2+?;~$^~$DKswK>^{;VT_A# z0mdgYrhj@~B}|{r^cdqAj2AGzjPY{DrHt=pEX)5V#_agJie)M<7Ql8&1mh${rZ^{dmgFBls{=xYfpUQX~<35b5 z7}J;GpnHt_GX51~*$3A$&Sd&)jHT}FXUvcM2>NQf-Zuv^9?1L`GCr4aHDjr(OBqXD zZDlMc*A0xbnSUqa9LC=;_A%~>?vWI^j4xoE$9ObjIZISB=0`=uRg8x+&uxs08Ph*q z-yUT;{=scwd=}&P8O!>9!}vU={|wzSDP;N2VJyoZV(ev}62?Ks)r@Jef;o$E5#!%7 zmSuaAG0k*%Jq+V9V+yaeu}yFg~9#{kum) z)^lX~g_zU&0w={Nz7k<9_4X2t=U)v%T)}i1`4JP|Wx{Ui7rvl@L-QP|4atv)roXjG3 zJ!6LAAmeEqI05Yu9WBfbD*E8P1co}0C zrl=%NnUOn>@ePcpGj3sgGvgZ>Kh1bKbjPGFlG2^w2_cHz~<4+jxW&A1Q zo)Jn@+BQQN?_>H5#-A}>!1!~9Ec4okmkxJ=&$yQvvixAQ5Ip616I&)3NJXCsKGyOyiLGXQidXM4I3 z7af6Tj>hAF-E=4OoN!n=KWID)QMGeMQeH?9F2)!GkLL8Uy#PAp`x&MNeJvSq2(gd( zFW1>sU96)hO$bwSdOB-Cr*z&t8|PG*$E~xdxRn=5o>Th?u?#pV1{}uI=OW27vJAHi zfs;bciN9q$g!%u%SVdLoZyYV-dir8L3!D_ED<-|1@iCGou?XK90M^IzHH>>P|4zoz z-ag1!CKDS={DU~Kn6ae4$`~2KeZd$5qvrH{uO27!oeMMXUf`tYud}G~e^4TMrr(VB zI$lgXvJ4+F?!)}ok0*NOVR7>&$hd18aef;(DI`BJ>SZ_sc`ruTpAXprC+Q;G0meOL z0RMIVlTZ!f|Jp4?5IBjG*1FSqp4WIzf&PEUJn1@%N@v(5GM!Oq?rPwqIOQ;&e`!1; zy%y1T3h|^d&nU*-7}G~^l3L8&jHUh`Vl3Nk&{QM+nT%zGG)(QfZGQt}49sw6T}t!- z!tx<`w77wC(De|D=7&oYnHx9S+hMd#L>&QC!dzXu+Q`w`|k zdzFz_g$cjO_&hBYBklpvDQ@vIn2TpgjF$_FYG8dl`8ne(j(Zzpnao#=Z=(njI#Hd)QS=o|D_A0_F#bCvM z2OX1P7ZtzKRy@RWhqy@5pTn~rI?tVAroyZ6{DMJ8mDojb>Sm6@_lS84KOlag@Pm+r zbW!n;xL@Ii#X5zb5>G4qtY#rn*`5-wDf-jmErmCT-3mV-K2i8-oYR*s3Z1lZOHw+k zMR$cC6}=T+ElyMTG0{iiM{$l`Pv;LJQ{g{}9EI13h{9_{p~Clyu?pWO@Yo}}C~8=x z@V(*+;5G=dNG#{KL~mz&6Z5t+zL|C7TgEMNqYYQvm;+--6UALETw0judL7$=^)e8i zqve~a;gb+I6E-=0Q{{Z%RuOFL2Fw6v2H?v_R}Bo`IOr^(Tga7NlWk__B4 z&8P4QX?Y6wO2f3nE-Fq+D^mEBwBZV$lr~b~lhcebp*NXj?4qK3+7yM;(=d#RM^VQj zVNWCZJjK|NMl#>R*qTN%e_g|X?Ao+n`e}#9Wp>o=gxm2iQ@o1rrs?who3xBD*tBAF zFy#5FjEMhHjOS*$F_!Mo?m`gHTgr-$inm2biyam3pbpXzeW=1|Ci!$rgPF^Y+N(t2 zZfWBcKHemsjI?q^KOwDA;a+L;6+Ss_k-{gX{aoRb)2>(8mbP5sckvvTbX4C~gMUbuDU)r>IM`~ZXOgy^&v;$K)kHt@X zJmlKp4-tSP9Uy!r%9DvTK7FrF_Nfm{eg6YfTkoN0?4nq|YeOB{dXK5Ucc`5q zq8obBVVZOI@V$FVh|)3o+CI}9+F_bQKNo(PE%^6~(F%WoXCtMHiqGljCA+BjLKu4T zC7xx}>HiRhJ{;uxz;q=lzN6!9?4WO!yAuI)M?17_Th#ihdB^k zmGW49VXdqR>p#irSp0f@+rg{Xm+&F2Ufg$p7oeP(ux996kh1TbZsCas^j2}Y#n6A< z;-Pjw!*UYRA{WIIgIN*+2Q7ID7g!8^F0>drTws~0cnU4$3Kvkzh#rc=UHA-xWDCPh0nA6RpFuX1< zlkk%N9Ok)?<4P-2#_cC>#)Gd-^Y~#sp#kn#dFg{-JK|7%kHxRcf%vJu|4!L-`A}TE zT7(_xA>k)buS`*Eq3@o)1l$U%m-P_Wqo7M}iAkR1qGGnCyM(}TOHYOCEGH{mZz)pv zN{cb})?3CYdY#3vTk0*-6#Ys|mBRHFS}97I&*t*H#cgo~w<)o;(Lc?#<5$;bsw3f# zxsFX1`o62IV}k2wnM?Iea9_*Na0d9vMMaaT?oFnCH^3*|>HD&|`T zO9;HclCAK3i(&gMu;iQQhV8e>cNrt3_<90xR6oK)5I0l&+(O@voDSS7u3~v!u3;X#SeLG{ z81nqN$}5Vvh76OI(c_pOl~yE&z^Kg{y! zWu5N<_o@h06rk53Dc2RMfK1HDXy{x)E6F%O=W2`CqSkF@Q z)2(MKe71FTieScL~zXDHm?TCVT_Yn8(Nt+fhgSq+(=Yh7xh zH!FOu_16lYXT4wHbFB|5e4f>qi>=nxite%+b8EWwT}406`getow|=Xz*ZPCPIaXtj z#cMV8SaPho#Ss+&tFgyYV9nRGHHs&qhbuh9I$GhOR(($cYiH|ZMNhLDwqnRyr|4m; zJ_1F>Q0oFk4_Oy09JaP7e4O=Wh10C}DqLV)qj1prJh0riImt@%{$}pKC$rA%V0;SG zKVaOOvD{BMk@Zd5iYHoW{FM71l1`#)qko!f$M5S}zBCRH{)Y8@gq6POE9aWwtmo3k z8_s%8(W7F7Nw-HZkECB{{TTtcsIXd3mJs+tt1)I;t*0rv&FWV;%{oNku~uVlon)P$ z=q~F_h0{&CdYm<`=*L^HQMkl zX59`f=dY36E^_`F&N7kq+i-3diqb~^G}n&b_cTm0Cww9XvrO@fmA(l)2)I>jkn-g5 zc7sXg&zNNX47V>O6%}pP;|RcEf6OH3KU*)5WTqQ)_ZDkF(OiZ`d&&@Y_u*g@fdT~MyoMrZM7~@Jbz*N2T@0yWwIW`+%$~wbJhdE7Xufe?xgc_ zp4yvDW{MqF`nIx^;cF(H++mX24%68Cy0sf60v8o;SZN-Y zWq8f1N1*i2fM3Arzip-OG|TkgGNu2v&W?26Rb|FLkJ16chRwOl)L!qJ=8Ij{6Ok7= zO2=3$y=QeZRnoH*ZnvJV@cZ1Rx3di2;WD%`-olV#}gUg6KI zmni(Lb*jSuuufO_kkzoGezaDb=(_zJ#Zhm=Hn7-|ipOd*#ss@G7 ztj{QHwLP!!e(N@czqfv&@Ilj@^-t^Z7I`5mzPGx8WxIUDa@)df^fBv+Y@T2wwV!3`R z1MRZJ<}}d_`{ZVuVUyfsJ6rMm!e-bcx7duez%OiJ#dC|z7-#OV8EyA#o6&Z6*k&vK zU)$;xzSCxmEx)$?+(b_*e7Eg7h3~T&Hp#uVRf>MU?LLJcuo-s9{kBI<^fd}UW?QT9 zA8Z>Ge%!WE;Xl~6D*Q*=D+)hhdsE>*+1^rkz0I&~HrVvRA}XG>oujpBR6Jwrr||PO zW3S)^n_;(XvFWFHqGFrPuumFoh8@yiOR97lZPzNCuq{(~j;&SUxwh2`&#^tK@LZc= zpUk%zdtVD}#@<)Lwo~!HVS8WUH*Fs%e5-B0!mDiGDEz9en>LC>#hbRDDZJgLTP{)2 zXggQY8*Cnh8*RA?Cv1L&=h#XVo@<+~@EqG^3eUC8RCvCvT;YYbg$gHZOBG&dyG7v_ zY^@3}v^}BlM%!}=Z?bJw_*L6`3a_&1qh?e*X!}CZH`=pa z;Wl{ErT(BNA%EJ#G}8&U!;_A6#Y6g*!)Z2gIf=gwUe~vFU|nYj>pG+3uF69?py##M zR32S7C@$$nS3IOAU6n`I6N;<1HDSH2b=*~Xh)2)MEFYqGm6th9U4JP3RNPfwq+@zs z=DHKTtGslbr1W*2JR0t$%?3255FU!Q%M_Pd+uiye?=+JQFwHdAUy2e)7ZsP8?3Oa? z*@|9eJx}2)tz#5!vKsU69P1s5zR>!F!i%iy6kceWix*kjO!OBNE@k_if>E)^YK+6B z)(;dq+ue=1`|sU*LZ8V+MSJ&?6@I__84B<2?pAn_wZFo1 zt;Rk5dDdJ-Uu0U7USW+Y`i}0#I`p0Hvp_HBel(r!0lB`IX0j8eDLb=`ONjSA%6_NX zh3LAR+kr{8S2544-F2om&~!Pp15^3CN=L5?@z9$3)-KY~%TGMlX!SB<(lMg9B5tPW zWv}n6>)46*G+d)YAFbjUdE+dfj!&$b)-c8=Xx zwDuw*V`p3hS*CKPqDp5;ZyDP3ZG_g zP&jHg_A&a|uUGV9`>zzvw(H||RCw(Vn&=NH?6n*AL)iYZqDSm>Urg%liQJCTzCF=S zbD7kiGwe>_XIYodv};2V&lh@|w&Oa<_EqNbD1B%HO_yOiFv(4>&4ZoN(PcLW=b7|> zs!9Lnn)E+r(*G)x{>SWw{#Tjwzuu((SDN&Hgh~HLnDqZ*)_96bmLX-Y4H0l2$RbQ-6P5R$# z(*G8d{x3J_|4kn@#%vsC}B!^FNw&{xOrzKWftX$4ol^g-Pd|O*(&vN#`H1 z8|%d1+YQ^}Vf(i#-_`c-6@J)mtPLNr+qH2cDwf(a6u#DO*d$-rdn@`H`v8SMw|f-+ zwcXeQUu7Sr=y%yKRCtyBVugQWpP=v>`*ejrx5pG-Yp+#!gMEp@8|{Bq_$AhVs#;Y1 z)!w1#+wJ=m{+s+-}!x z@~GHt-=OGw?VA+-!2V~2_t;-l_(S_k3V&>W1-LKz4({u5AD46MLeto?kjEC1c^mzM zlk?pTrg5gF)A(Q>XQ(eo{s&F`zwgAa_m_6?qA17Y*L9xwNr#V>zOKW>PdY65e`PAq zuRE1T*KOh_T|HL%x;_&>>9geDrpB2z&~;rU9?~Vr^S0g{S+=@95D)1?S3J5t5YIc{ zA=eJC<4g@-06}Jos3T*bzK>VzNW(R`k5}w4_VJ1x8K9Gkf?p!SV;shw_BhA6+J&eX z>lmc)#SWjs;~m2kzSuEB;qi`93Qu;_D?H6%?2)c@+^gv89BUNb>Udn?@s2iyFLU&T zoXJJSOo!fHQBmdyn&@GLD;!aUV-Dj!U4>(uiEfNnRgSo#&vG;=T;j*r4z=4zh1$y%ss%QurFjZiOFr>{WP+<4c9t zIu0s4&EeE^BP#rkbcG*x^itU7=&Nv=<2;4CIRXl&JBBEn;kZ!Y6CL_W9rv9b#y;4I zjxxp5%Tb~5NscCkdpnW}pX#_y;nN%|6+Yc@r^0<4cPV_T<35GYcIbT}D*8HJHqo~$ z+}H7%!sj^J74GloP&mt>A0&;6^W=eG)~i8|EN$@+73VuHPJ%N72mqgLSu90`RV za@?ly!;ZTZUg}t*@M6b0g|ByPRCuoAC57iX-c@+5<7*o$iiyY1mo94}L z96?EDdPL!G9EA!WG|i#^bc|N??;K+lKIkY>_&djVg@14uds9C;E-}%K{iuIAN)`P_ zN43H>C$`MkMa3ZpZUboeM_OWO_>ki&h3(F(75>z*Lg5b6{BC!yRCM9IS7D3O*vk@5 zW8Swop9GKG1KG{8{Ro*Ci4VB{O1o$e_sy?B$GK?kchYukXS&=MYUh4U*|pI>&9&oK zzYk08L-+@zlPLP#p+ z$!Xlzp5eSk(Jyl@SGdY)wCN0|v9DfcYR7WtTE$=C+^BHOxm)2X=N^U2o%<9nbN)l& z;Z9?%8gtVA+qo!UQWQC9T$N`MhB#>qk1&0x(|oUlk{QKqH`z(!w$z_XoYZ)em_Eg+ znMk4yv?6$N?eMtFt{wl+(wH(eT#k#G;$dfzN4KRPavHYuL(X*Y$$R1tIWvICMZqr- z;m7nLhv`&)azr=II6dw>Ly|%NgYztf*E$C%yv{jX;kC|@3a@issPHq+5`{N9YZTt( zT%>T^xme*k=Pwk#()mk;{955T&TkZ6=9|EO?_bG^bZIA2nDi*tvJ(O8o_|qB49*xhjr;7+IZszSFF1|;jT@X7D0+*t5Lnt^tGWNm`_YeZ z-<0>1AL9NZ&m2lD?=MSCrD&5y(a!u|!F_xY_v>2K*NA7a#@CKtl9hZ$;K=;C+=!oK zbu4~eCd5yM-Ld#}IS~ISO(tec;|$SP@Hms@TH(!#|CZ}Dz(o~%47gPcaP9f=(4l8E zOydu^sL0ZX6UN3Fn?uf1Bn#++U8gE+ab+oNb!97TbNLl^x{4KcxrQm6<1+4l^>!I{ z!bvW}9yrBi*aN*?*Q&Uuxs0*pbk}c8^t%;4-F1(`eO$)a5^$|E(Vtd0;L_)D+}m<( zGtrInGeOsOMGv{&R5;>lS2*nYNa2WUufjgpeuZ;g-BiEGbEPXBaUHL4$aSj17r2aZ zDbHn$GeK98;<3AEY>dZ3HuthX2saOsWOrsXfsbRA-yClb;1_H75I_U(#CZ)@VA zw)>dVKgp#PfzzRO{0Hbsak6VKbnH}26h-0`efv}KfK9Z+Q(I?(=V<)84iLZIb~^5= z3{U@rG>L~?JG|cRI_|3OBwKRr@RPd0)bEJyMO`w*FxUPZ{fuJNMg2=Y7ZY{S{4Sr1 ziMovWeVE=v=wHKKhRr|B)f@ceh<>_+z$0A7oIc9cU(rXp1}Z$tWt?po;WF&QQLdqi zXSmCl-$%J-fG+DEW&M%QyhK@l#D+6J0YEp6oL0pK-1RMIY}<0!#aFEXzmAa5T%1*xKly=GyVA+rw0M z!k2R0r@8i{?(#X8sirzkGu3^XiT=N$?$bNfy&h#F7ZuZ8^Ce_?u26Wqs}=Z4l)p$! z<@%cCKx}RBdKydw)R%XIo+;+J+En@Ha=t%sndkDjL2;ts(c==Pah@EWk295dfvdYDgT9Ez znG>#}GR#$RNp3Xep9y-VxWUzqe5LL+o9cUm;*W|JlYDM4<<(-6$#OL=QTjA4c{%-+ zu8cfA{aZPGdGFv>Q@vK2@>*%C*KMY}Zs)jC-)>d>lo!=^9Ot#mFwOHX-TsJ8EvddWaz3IAL(GR-rQusC3pA>%G z)u!+c*G7e3cfF|a2d?)O{?PSL;H7A{F%a?=E={BuLEusXPMql;?}1c$j6oR^kF8KmA0~U*%&D>Hkz;E;EBV$gIP8s`9R<$9bBQIz45c(w~&Jy{x?J@V)>$p|8Mh=uhx0uY@y<9PP37bi+eug?VvH&hP7$&Ma>a z@O9pv;8~to2YZ~?9P-It7x327gTYS7%)_{$v5*Hs$AL>jW{qMfbT;HCdl!JO_AUlr z>pcp-&eQV)bUuTZhdk?NBPRreVzHCveO+EypUX?J@ayuzI&AaB(vxjl&1D@Z z^Wrg?hC|Wz<>_8++w70FF=n>0+QP{?#loiTll{?lsJU1;Z5_75Vt#7-WPjKuYYQ9e z6!S;hGy9`$SaY$ku}*Dis1F5Te_H#Emd)Hwv3x0|f0>x)P3>7RK54^srQn%$hJoHH*S@E4Ao}cD*G6!&AQcVO3z?N=OUs^F`M~{8n1&=A)R|Y z^X_SZHwE&CJ=5oW#G_Ye(q+ZN-X-8Wy(__wcy|*kySiOIw;*lo7HMM}h;tN2ZtMT@ zncMm>O(wCN`w6NKaI44fmsMSHtE?YXU2&_lTYg-gfB3FIzn}D()0VkD`B^5O@H+I; z?{pT)v{hrAMKW#GJDo);ZRwsY>HFSD)_-KhV(U!2CDxgEi>?0K6086Ag4f9RC@WsH z`kya(`$E3dItTA%Z$HSFdjr6)cn5-4cxJ4zGIS*5t3tPYEaEuNJ5tIEq$GLO`p|DtW`7XMaRpX>e! z=NI#SGQZyOL~lLU^tRRKC%Nxv4X4%TUu*UG-}BPYS!dblPnM~(~I()}_nzj%3y%L*C(pRN4*#R~r~xHl_)MfuH&-@G`U`@0p#zpOZJ zvijA3T6y=Etlv~Pf5x*oY@DZSWZ3@peni-|Sn0e)mVJJ7R{V{7ne`vEF2XbONTuO= ztV}K|qTxz~#Idj`v+?jq$dlniz=^OKb0x#3%%;MooMyr&Lw}F3DYNOY8KaekbI=Ke zXMw}vd%z-W#$Qf254jtD1sn*k1;@e#a5VfA*bSR^^dc<#GtwTmSoW|5dCF? zbIg_Dv50$X8Fy8#TZc`#u7sbo=Fo~`Wtis;*s}t<^A9Mt3a2wZO#|wHaX6h zzTxU>ZIjiOP4>0&qOWC>{Vki!S~fYr%C8ZYO%Am3Yp`XL2Us>a#Inf|mQ4<|@@trt zUn4A=>?gyi(xDn*&QtufD-djNlS6VG_YcBzDDG+w?R^>dBS~H(-nVS>1Ncd2&&o6> z@5tW12uB#mQM(f^S4jMo75*=+@P8H7?Oj%UAMOeLFTy%(w04e<@0X6wH4RTAQuA-` z%RD_b$L-GhVOOch^2L459LdYXNs&}v-IqHt!gY)4%bghE@}>H6Cq_&ia*_@{h54k2 zjwGEA5J{3gyR4WNG4pJj!dWFI`Cs87;LYKSz+1!Dfkos#@Ye7@!6Nby*p2W!7&|&Q zH6k3%;fNd<`|^k-H}9Pzk@}EFBW2)4;kMuzk#68CBKv`_j2s5OA~Fg*D`MtP?ueWQ z`JIt-!FNQ=8u&XS=R-a}G8>!^-vnM3xexqm#H?LeY|X1I@t%RsV(&Td67L1@3!d2r z^rC0h$uIQ^(0SSW61?2o0Di?YYg$%#W>3({klF9EDwOgKD=XdznSDKLLyaI`7is~1 z!fOeBHPQ+^BhnFkVdNa}MUjiaGb5LR7kgKNmw4BK7kf8=mv}dVU+~O&_!qtVAYbY| z34YOA0$%Ew{Xs8#&qKc4dl~$S_d0lmXV%QG41Eass?gWqH$of1YeT<**M&BL*M|NA zUlQ2@el=p&+g}pV8_wy>^N4l71yl`?ErOg5L@44PFy!0e;5o3EmPO4Spzc z9Qd&af8UiIt*?vR1YYdbfR}g+z>B>Hz)QRb!7q4^fnW5V1uykhg2_I?OTD+iFMDr; zmwWGnU-3QxukgMGuMGVJUKQE`ej`*;qT{kQ6alXb#ldSsN$``A4EU)?eejaVp5Uh< zjlhc|O~FeeEx?N-Ex}7772u_jw%{d^_TZ(Fj^O2yYVe9kKXBv7U~rSjXz<HhT^33r~am{;=NVnHBeiXISzJzz>Em1wRzN3H)IAHt=KNC&7=0UjQ!( zuKGkbA4-gFMJYDM50l(<+cYWE>Iz6ue_+_s# zc)8aS{EEll{#E)bynVncL%qPOLW96>gocCHh7JX{i;M!V3mpMo8#)r)E;0t(F)|U{ zDRK(9bL4bz*T^~G&XM!LT_ZEVJtEhFdq-{n_lVpG?j5-q+$%B<{C4Or@S4!W;CDg` z!D~WKfuHf-1YZ@|2woiC0$vi#o*gwebz+2rpw^?F81buKktZ2Dbq4#-VV;wGBFBFI5?)QbFLhdT;@2?GTdKb6 zZ?f;I&Sw8Zjw4={`kTT$&&ht}>B`cj3*Y-oj{E;hWLMp}fYafbFz>@s;eR&F z`;1~z{#=;nhxa0`7SChsmZx;k{ZdW`-7l?;cQl7dUdX)D?+4i*?h{tXFkBePTY16# zz}o!L{Xq6d_m`Ntzf{am-FIRg?mN}yzwT49KKGG!cfRX>7yHlsuG+$*`(3Qh{gd4t z9`0AXoMSuLwoGxqVpr`@_d__$y4}}YTYPojfc3d;txaFIxva0-S!QnIirE9pYx7gL zkL-_b?=yj0eolpL}w>-Lw^M7Oz`cgJ(N?Jo9Q z-KO)ibQ`@hyu0&G_YF7<+y|)5u5^EZ^|^iD-Qi(BYYT(!S8y0~pMjbC47)o#-1jJE z+brKz+va!ru&9}z{7dTdKWNS{ zR$r+Qhhr_lk(hax5smeQJRUm`Tp2ZU`|;R8kf&ni{ZwW2a7%s!c(3RfaFghGaLec< zaAkBVc(3Rg;L50ZZ&e;W5As&gnc&Lk72x*KTfqCo=7YP&%sgt3n0a^5E%p?2dc>B1 zyTx7v?;BeN?h$(pJTSHzJTkTpd{oTLyN-;R{XR#<%zCyLqh=2F`Dm4+Zl-SzM%#g( zkM0A0DQflvJs&l5voA%n(0Ms}0QlAD(cm|tlfmys&j7E9o(Fz6Is?2enghQVodteB zdL8(K=ndeHqj!M6iQWtTE~@*?bk2G7LC7~op8%g0TLIoLwi?_gwhmk!`vUw$RBtq* zx%XH@>L;_yib1hv;K4C7r@A^i5%SYw$AOQIoeZ87y9Rth>|XHX*yG?6V=sYEi&UC(&u(Poo!tKZ{-r{xmuZ{8@Ak`17dlhiAoS(fO8K zpY1^F;iFGO9*HdjM`NqOUhEriIQAQORaBq2NoRINn^WIg#dmeI5`0HgpGue&HPP{q zFNjVA*F-0S?~a-|z>U!xAwMFz06aSSDENrzBJg*S#o+HFW*%=t1V%;1gnb@SxZ#@b8fi z!GA0G}ECH~6fmKA4{N!bIcLH&*GBi}nLw5IqijQFJQ!%IF#3+0p6XInhhO zBcs=V502gj9vQs@d~ozm@V_Iv&z}{WBTqrTHS#X_qUdJuInh0+U;2*=c#Qdbl;<4O zc?Ew&c`jm-ly8di9L3qh)#A@67YnuS{jVs`L8-NH|CaJArT&&E&q2+T?8JCZX@TTG zjOUb|mRu6!xufNhgE5|iQty9%jZ$!=%lp4y&T-iGit?P&TB*Nxl;@6A7#c-+zUXTy zZye?MiXSC6i9SmlxRAr#LWYgK%hRR%3b-#l3xj&%N|M}>Hls+n7o{ugdR(bJ!^loBOllO9eptJPz1(`3s zedcE!K%5i#XbtHfBKgJWyzTB~e+qPQUt(AN(R~m0hx;D8>W}WLus_^a*;RkIZ==pB zTDy&1>HZ1(ulp6ux-X%*w*1ol4c6y=#m?jx>+fp3_}*giVmZ44UAk{mjBAUR?hEm~ z-2d5`c(MMj#*6P|SD@>-?O4V4XjyhxCr_8|gB6HhCjFOlySM(~zS%G2XSL`P<>06_ zKh^TJjG7bf8(l=@NR4OvMHdo}y^!_$OP#5b2Sv#YrOVS#-Ips6>%NxeVsW%}xIeZt zdBOU-niqU;F&kq!y8>O@CoHC;`-iNf`)|c~XX3*Bx?PQn?h|qtbl*?&?zS1*f9@;p zOdMH%SL3Mrr5rEaFV(!e+rZ>cgK+o2tw!VZOud)9;UfbRI z%YN?e{AE9j<*y!Za$Gi$4!Z(f+jDJcV(atxba$r-`?7d7|%xy#T zgIs~GVqE6Sr&AxPOq?0}afF_)o)+WzYIPpYv>4APmgr~&xq|0-am9Cu^NL= z`^oC^mJZw$+)^R_Wh@zHxwOG5fD{NPcllzg)?ROJe1$Opd;bZM`eG zEN1rOTppt=lrAf-h+PZ5(s~DRRcr<1!$NO@hg)Zk4GX<*$v*^-2>l8^I282bOMAer zbH=WZwSfF;%MNG9%z1`$VgsOaNN5E3`q)3f7stkfFNsYBUmDY6@T|BjHV5*{V>RF_ zVhh1n#`56dq1E7#p#rfg3o~U|Q19B*IZGVPJY70{3d9_*Vmc#yKUs&vUra|omvw&h z_1QLKI-(mTG2gb;>9yMm(qq95Wy%iGoiH`v97oQC77{3(U zKRyfGFMcDqfBa@}JbpVk5q|<)H_ml9JNoWe{9T2_jp7Ax)A&YkqxetYrtx3EE#v0= zjq*63MZk{Ew~3px3d`eqvLh?n#M97e8`pI>?H!AEg1k$-CwT9;c^BC&J{0oKar6GM zOZ;@mlkv;IsrXgkbbKB-6F2V`)A73@Zx%OaZmf@)Js{2EpF^ird?UCbz6rcOX7+JZ z#O1`0Oo#QcI<8C-N|SbRb53ddcp2m!;%&k8;zPjo z1ro3wb|Rf>vYAriYRQe`0pekjo5TsF%hOM7w*_Lh+nu?W?_pP0Bo6kFP#l*Ut;$+{aM$ z(t#VJg-d&Kl@E!#>_11vPz0ZkNJNsgst44K( z;d3&N=E}T#PUa)ulII`f)R`Mq^2{rB?v^@#Or5D!rMEsveGapIW*6U|r|T>Ras|3{ z_%-j0|2NS6#r$VEy8>Oh&Ds&Oov_Z`(oWn&?jPDt9JwS=*@=_jvZ>BXaT2C%x_Vnb z*@KrbWz$vqbPLOkEx8F}OK!s0l9$+%{U?_di9}OL%*li)*NH@>C2s?+lc)yQO_(un zorLL=%(Kq=sFyHj2+Z@0z12&YdSISsY_G;U0si0XT@GI0{S!P-p54KJ(!Tq|GRRYj zx4?TQz5!PiIuGe6D(Hex0EIV#L8}+1lKLfZk2v|Dvn%shEMjMmX*us{M%A7zI-*4OFA`kZdH>5n5F`y3nkQ-)2aZGrw? z%0hAlx|nP8lhdl0pE?cMADxEVv+lpK{WC9-_TM;>KSJAoBWeF{k-SDUlJ>mbXKDYm zZ-wQ?o*P?bp}AES8e3(dc_K;AV@KzZTlQa>Xs*O0Z=2`{ZkI4)g!YL6knfWi1n!(L zb35G=6Cm%A$bow$t_Al0JBLg#x{)J^oU1+F8eQgihGuMa3!pV78 zESxN_?YXR@>k}SR=sJ+s9ns@tTsW_b)h9aNIh;Cww`ZN#EN3njPPT#C!pS(7CDO97QrlKYO< zaLP7||ICUZus4>6pfe;9Ay)0$K-|mEiqe(5h@W*-B1O+q^E^jd&pHbFS<*3n9xeUk zM`Xp=M4EwIR*XsPp^$i-RVR;6n7Q`}R{eae)h9W|>XS@Nm_EsIiT31Y6@{}}94*VB zn!`B~anJkD`Bqyq&x+%G zD_-*w4b`o5@BGBx;JXs0KYx!E$GfdKE=aV4{zF!K^k8Bjpm!4F#fv4;~wA%Dc` zYdxG8XUR?bwJnRtV$@zY}v~5*R?~wUE zgLHDDCc)o~xPrKv@>JVrUOrv@DA2Fb{kthFEnfgeTDcR5Y$g->Tmc6aF`ZEQq zpZ}TFpa0zI&wQQeLeFAHXV)amyy({nbH2?t2|aH^>!=g`l)BWR8Y`U82Ir~hkLAlSY>|F;aAn-pWUEjdXpzwEMt z9i69_H03Lv?4`sck0s67ddZ|IQ>mmGN9>Ut z2K`j>AaFW)2so28V~stMy1dXgZIdTMr*85TVwF|_887uM!jfbu$<;fKpxn}-zC~D? zci$%+BUV$MQmZ6i`dlD z`IQxANmIUBCr$bK%bHKvoEYVYCo5VfCxJI7P6W43o&nyRIG0$}z0G9Y)tL#+wwW)| z`CcGh4v%eb^rtL4BN)gP=;G%V)6rpI9S%b=9S#q>0$tM?$Z^^-cadq^F)7ArTkT-k zD#?A96`ibf?wB-vs!mBWF5V~E($Y7!x=*sbCGP;lH8|K?`6ixdPBXh87Iq*YInxTxI+0PiM&r!(x@hDanbe_>KaOy6&<|`z#Tjq3PMiF;d2Y`8gNtZ32?df zorPyDyL}`1J#?N=ZUn!P{0Urc&6mC*ZSR${xL^I0v<)@Zep=eVw^IJJOmp=O>%~a_ zyzieHcUQ^twByvsRMWRbT>4+Jx&0pgJq7w*P1nxctJ9qCbgbq&iF5=To?173DK1?2*LY*V^Strk8gC+ap7q|L#?x&&pDl07*8*?8rN0=w zA^8mWPS3O#A0@wr{PX1R;4hOp{j=ipq*)iSHpypZs&e#Bl9OM}$*lg5dR5uLJnJla z_3l8ur)SQvDA zvY$FVx93)ttuo&(ZS|+5n5g?yKT2DDmgF_!N6Yp|$9Gxr6Z*q6E=xur|0}r%cyn?u z@K&qO|C`m%-V2 zDVeFV+_|Y3>8LomDN_am8mTOJDc$d-u}`WF>9b?GDQoeRDQl@zE2Tm@d!%$-O8xMZ zuA{OdmAVW%d!=p%w@BRy-Yc~L+&uLVxJBv_uoKFIUF*ADtx~HYuSoq1+&c9hxFYoh zI1u_4TpBX%S14rqT_x5U+EB=p!Eh=@_M`HsBvnfOs6290A!0SB;il}ifFGrFw*CHT zp8r9vKo^I+o~g73kW&?)^&Ud)rjavD%*7sC*|~dM{(;Z(A8o5|g|`%GhwH zRGNk4sQ-{M_T1UZ_bw@8+g(%5l{)EnOR@d2%Zhz1d+45Ouf!zpk?KgS#+&U@F0qPl zoBv2FzNJ7{7Y1_dH*@e3=0T|f;y6&oQH?PNO50ZRKLah>9%RLRNXkz_3eO;!ckDmS zds}HWC}rk<2B-8S1?$vhWpY_@V5(A~yw}8WsEni18J05b&d^j>=#NP0z6yOuDOGLB zxi7(v<%cOG9+4VFtmb(Jr#543K2XL>^}QAIEqVUoy8`{z^$e#Wb3>Vihoq{H(`QW| zY^CABRvI2`rQsn~8XjV$)uC2e9cJ0g;i+cyEOuG(jAzp7$W$*SCi!BoKX{4t9k<0+ zyS&8uZp#Z+ANWP9yE?JWA zG`ElJsJ_zGq2z!P6ZcG;vEhE{IOM(3rmpCnHudLz>4wnho!*o97-hub{Kypmc`#CW>#hI8rYAT_E|0w9bpXf1fHl{q=FSg5^5y1!9hSF&!O#*5T(C z)8R0%E6}CiJ=x6p#qy^pY-QrK^qff*Q%FQsROS` zoBr4h=_jCbefnAO4e1r&o6~E+H>E!W-<w>4auG9J^VDMaamfY zc~;z+E{FX7G@k{*j=t5E9teIgJq-L%`e5*bY5f*1D;`Q84f(_A3E+k4so+KFE5MJX zuK_=vo(p~~eJl9!bPae>`d;vp>BqrK(rzmVP={CrxUy+G$br+JNv(#faIy78sy9*{3j9|B&M9u0mu zJr=wytesW%>v3S7|+1qVayZgI`(D{*m+n;P29t!QZE6fj6Wd0&h&O0{@-f2;Q1bmFP5d zG9AHgW*9h-83``Q90HDHP6kIaIpUSFPCNZS>NI{rfv!t@{W725mGw-QOw%b9Q+^|^ z5uGyJe(}bR8nI7?+l3~QJ7-eF?6RUuMv-FHXO|USGp7FOk*TZ1B=48m8=TFUdCmcu zmX^E%JRs8=oXvCs56+l=?*W;8A@7;#1MZgT58gM!ecdVwe>KI6#XTkOli}o7bBUcZ zrNjfIyjoVzYK?c_jBf%WPyg59FA(c6a2oQQ#&Ei~Of(AQC=J!S^}Pc!O{fv85qk$r z+uF#_bxx~B0n_d`wRD;V+LE8_=v`I7w3RIbJg>rzt-`K$OmM|M5N1#Z02(CfXo%(Z00)f{+S!WT?03RyJT(! zcMaSI?iTnbxO>3#fqMpCfV_8L1$e)}YvA(0zrbw*x=+aI^AqIl0wt*TIt28~d)f;V zFl|K7fO)UcEz<@1tpfbcf?ZZr2F@g2NeNah8VA@;)Hr?b0NWC~y!@%{t&3%ET{0p6 zf2_m#Rh#YUyeN>aPETe|Pi4z4}FYx2SHq1O(+Hl=WYHG!ltBGsG93vrBq0#B2xvf%(MenS>?Sl)5VhO za!2WJmG|~mdGC-J0R4s;Q(pGWj3>FO2kOc4zJTIbO>s2!K%H&!g}qVvQf}1=t+uHX zbodLTtLq%*W9eeYY0LbWOxsK#M`^3-hqS66=zm!ugFqzHHV~Ag?<}#fXGF$aeqs+mOH_l81S7auG zD>JgQN`KQj-pr|%{0wkahN?yBI3LdkHw?@Kx3TJ(hJnjSuFjX(Lzb`AGVkiha;L^+ zXzoz%RQ}dQ;^xT@j-$2#owiy|{}KhdhA@yT(8XNLA5Mp2{;<3@o6%`iAYC2bVqEOG zm(V@Mp38D}1-dw`*cIrq`6?EYW1D3D@KWZr@zg2$-O9Ui-;i1l|E}COq}Ic~Ywguq z8)rY*W$9j1_SakciPl>CiPl^Di3;&X>Q=h<(|8s5v-m#Xuj8h?e;GIJ``6Z9)NkUZ zjDH)~`93NaAjsS`0w~T;5M1H;J@SVfj@~Cz@NrH1%Dp@7W_qg6ZreMdDpWc zZr~IBzr~xPz56}h4*W;FBl!2Y88`hI*Wc9O{bkkASr#{A zsDH;NLH=j_RPdJgS>V6p=Yn62&jhcEUk!dWZpKin;@3f5<(Yl8tE_#sZ^q|aIt##U zt#7MUd8;A+IBv#M9lQ@9@93FvQw#48$eVg*FKxM3>8i}2I?$^IH_QwKmwOYyRbGx* z&HukA%a?kW{hr)csNQA2FXcZ`nazn0O1W*sgIbE(3n@270D$CK?`<(=ANNUAbz zSaB#d0pl4d8+((o-S?w1!CS;zBBi7 zxzPI!`7BoQLi(xqp0l62jBL-kys(_PSRA<=6^kRwcP5;yUo4!uoN^edeM8@#bs1zi zb1@rZc`+Na@73j({b4Q^UzQh(FUxl(zN}v?zAR@~pewfnzSNQzW9|>IyffKSnHZa@ zIZgKm#>hEy)gKrm-{YwMz!>X2&e#;)E*;JNrc8g}_>>t7jJ4k59G|LZ={Eo$Z@mYc zlxhn33D$eSlTzi7PfoQ4pOk6`o?^WhJK1_SG&Pll&dDj$XPBBA3i;`&ao}@O=Yh{n znX&D8sjDpcZ19Yf9+zdsv&nlP&!zI<8L8L7Pb5D8KVz-gdm{OXCI1q<)LI|)vbBD1 zskN@-m83qWC@WSZBUDD%(RU=1a`Us~6-hn+%xzO$OGodcWVsptw)IR~U*(yzbk0bP zz`b=d2ZI}A4g;T&GIOL2GNU1Hl$ii-oH6ZwMP>%%l^H!g<#EI%mi%&XRmRMzR%T2e zU}ow@=(Ne)0-l-DbtA3kOx+3jsj0icr=;!!pPG6QJTtWjd}`_$@P(;7cxLJ)a0hQC zxTE(jxP!M2+|hd<+`{@!Yg12tfKBFCxu?&_<$P)Fmt`KKbpV%p2ZF1-NyI~ByD&b* z`vg_}GeMS9)vuT!-+8Hi#W7NTsMMdB;{Jo`OH54h9y-;R7?a}l+o~@yMotf_zQmZ6 zAAH$Hu&uSGwkb#J8|^)JTo#U&{y&xEc1nfkzgx4X%Ta;+$kWBHKv$lwV$b3-wX4rs zPxrDb(DeyjyXp^@SzWht8yff9YtACQ&M-vGE>D*(g9TzPgFACCms9nQY1_4ey3Del zy3B6Ry8N=7xmY{M<+@lqsLMFttIN3NV*YU1FXj)+YwIiOI)VMy<+>ObdoGv#V$aoe z0{f%u1kI`+cnaFJJn87Vf_1naD5j(91lHj?p_qMgov^ER#rN)vZLoeZ+tBqAhf~)l z+q14)Sk7E5ZCPF{ZS8w?y~O@77fTbC?`oRxy*ra8tY0imSkA6M7uTQc3Ut{#)sh!u z?nkkFlI%yFkWNjjm@=KXMog0L(A7NZB0X4juAudNQz=~EzG z9Y^NDbg?VY#a!&U9N%Kk)#U7Xt%pXplV*coK<9l^lX)fl^nLF@@zOY$Qn4|j4Tzg+T>P7TQyNPfx5Z>^Z}qU0Iw*uN^K{3tmT$p5=y zN+X)UC>Pa%9Ptpzsgg}wDyB@7{By}>l24cXOUd38o*N|pS`s6ESaNA;&8CVeA4(n* zs{XBFN^lnYIU}_B$BHS9CEpyXA^AYbBg5j?iYcc_9uUv}T`}c0$p^*-h4XRAXT}A| zS4bY35WiGR`AqVpME++=2O4mci~31H;p{58Dp^fDR&tl*SmJXe_e$j{4EIRBIMwv0 ziYZGa-<%pt{E_5!shns%h4bJTx~`@=D(dqb;-5%|<;(}r?_PAVV_VNLkfSLO@NE{q zZZW@O%hBKL`dWOg#h+Qs8|ZUVo4z+-@o^T5Qwdp!CCZHbceca9G5xiiUc@%e~-1LAR?EBgxZhR<&W&AlI! zY=5f2*#CF0r+3LdZy9cM`cek{SV!qRN)5w})CjQtlD8f4xX)XkrtifzSMn_R-`nQ_ z6?CR;3nlMErTK85x6q53&MlSvXnLvky3ZWT?<@|ME1jEj)K4Y9__^Jw=BV=dA$r!4 zK36{Ph)b!WWgT{R_}uR>+RNEd$wz-(A}+})ZdyjRF+lNP`v37hb8U3&{z@L834*VD z=5Kbl8mQ!j7#&sRbFd+u>Fo2l6qeCSST!IG-G>9l*hgpFEBBw9x-pXWwp=RupIE zUz!T>qR;gyE&~o%^1-d>d&WKw>g0$pH3F>vS2dlZdbHxZ_j1Ja@rpm9eBR{q*W~AI z6O=r9oe(8g(;0Ad@7b?X|B5&#D5J?;Bw3eual50n2d5N$k*BxkD!!PPVp2Ru|mQz+p$M;f_ z_?!_G1fTansOi14=m&W{oQawh_2nL`f5$&zCw`sOUH8ap0}+y6Y_T0gPaxZ<$0U@ zFWN64_hTxq7GFZYulN=`P_Cs^I)jAVq(pzvT2PFgX)QhWl+jvxVct~@73K`6VM32v zX+6A{;HU@CdU!Dne2_5nOCvEhq_v~M%n^+g4_W$VUTB1P67u2VMes1pnP+a?Ig!@x0`?!{y>%k`oR!UD8n*_hZg^mvl^BaW>T`T4x4XqUN0F1ipyktmT&r zs^X=iv&RKhuRdQbsOt224yB9cYjLJTmQI$ZW>&f^N#}yE6Xred&EjrLelPfDu>gFp zcow`sECb&!{smrucUyGEsdyLihsB5BMZ$qSJR$Z5=Y?6DdbRvc8Q(%@*NE=WxmxrF zuaM`-E1g#{CqwhVbV7`J7|jFA^W>GzYr>p)zD8UMoeh?q{~(^S#s9VJ+&@XXPWIyV;rH)xU>N%YuzgfPEQ98GX!y&&_oCvNF`V3oIrz&PZUh0_j zpPq9cpNUVwe~GWaj2T=q!C<){9;# z>Tx4Pj`q7bdx4kGiZd;LRty9`CyoG@I46O(h%>;iiaFqQ;%V@E;sfvv;&bo=Vl((d zku1^A{YW$ie<;epABl4CdeH`arRV{EL<|8h6r;e83iEx^8^mdl{~)G=KN0tUKNU}d zKM_m7pNeO}pNZ$epNi$+&%`(2h2kghBVsG~BM}eMcbKRysul@pkv|jXXjIJgsyZX? zY*~NK%_%w8pX!^HXUqE2r9Kwx+wWMpzWVwCmY*ZcH!IJT^>%L>?&heq<8M|b<-Ie+ zYO9Xr`t4BCsiwM!>+nXTQ!TDRKOs-D99?QJ%mV2r{VD6`WensBbm{tmc^`_I?dLv4 z{M7XZ>vO$PEyMONS^vK$!}InwVbFB~>+AB%+{d?h&ATH#2a=z=nx1@bZE1!7&FFzfn+`7tZL%)2ALTrcfveEHs7EqSNwJr2LFmzc}_dO-8;hzr+a zyBZh1x3;{~^(2Rx>%-dOuIowG=X$a>eO-UDKG&JGg@<*Dl}%m8vOl_x)m&Q`Y<;d{ zYYPL{y~Vtueeu6voe?yW5hx_)MTu1jkR1M3tEgRY0!AFg}Z73gBFEey84{_Zp1 zTa3$m`Q=m=%IMA6h#52oNL)j^r?_9bMDkv;U%E>2-m+i%vE)Wpzm(s)DMw=~t6$pE zp<%alG)8mGn6#x+Wyxup!PlvD_5oKpW{$ms(+~2Fju~@xa?Ct=M`s*#Iyn=-ot;VG zE{+*nb#N|(yqj|wxVuBce(ADQ%ijjx&oO=1YG)zjeVr%4)y@mx!Ol|ffzB`BgPcFW zBOSArV3bqOPeneHupxMqL(>@2@%e;0rL#2pDucX>(*fMU=?d=RbOR4{%sB2~=RnAh zc8&y(bxfajymJEN6Py#lJ)G0QJ)LvGV;tSyWyQgcSzFl3se#UT=N@ny=YC?f{=bQo zZX~;^rd49xrmH?cGl$1kYA;DMrNu51?U(A?Gm9>t#s+MiNwvz(g$9b*V^xlq3 z%q~xt&ffwt=T|X*blS2Gr)@DEozASI)0ugpA5t!B%rP1~m5IrY$kASJ;u>*+ENk>{ z>fe&wcUf_wjIWZPfuGxpuJh${+d4IPZX4^lZLM@`Ydx#Iqi5^6u5N+np6m<&Pj%?EhjdwS ztYhZZ`Z{KAt&?*ybhGb2)gVa|ie^=U(v1&NA>+=XLO5&KhtxXFa%= z^EtS$^9^{A^E>hNlqNZv`Q1ckFROHGvrSv+tIK@U{T5D7<|k!(u5$7hR7`n=xSD3N zsHzq3_{?qkC&V=*=e{L7I^V-e=T(*+zUkCcDx|a8X#{@XX$3A=eZ?=WH2=!#?|p5h z^Eb{pxVMv=19x{X1b^bp0e|Yu1^>&b0l)3s1AfQ354^^C0Q`aTH26d3dGK$}E8yRq z&%l3Jd9lga2zf{MC-65ikN8j8H|=bN{5yx=6|>8V?;SluOlK)OEg<4#^cly=0`BDnbR>^z0To%~n>B`Z?u0Yp?bQROl`OZ3AHjC-# zJY^lu(_%V0ZCQuIUra~Gg>|m<vQ`5Qp_Hs@6<&_4?Wofs7 zpA}?>yHw+ncSI}H4{Yubpd?j*=X;1sYE$bsF! zP2kdiX(K`bQx|!G$Dor8ya-MQR)aHv0(g(WkKj51;b&@A1Ou(Xdj!lFGZV-{o(_xz zM*?HP(ZED-EWq`~iImPc@vF=4-Olux=aVn+na6#zeCGM&xy03^&q2G-;-@A5B<1Q1 zZN;pY=O4Z+(C@4KFmRqQucUj+#L0owB^6WF6W53da!>IF$;Zg^1hz_^BoE_@-N-s8 z$UVYpKI9}`ULhJw`ILaM+mm&1r@TJPI-_8kbw8Tl@tlpDLs1@?vff&g_brOS$0frG%82Mz|$3LFNW8yE||CNKd! zH*hTYn!xekxq%bGHwDfI-yFCQd{f{e@Xc~RGXF{6=CEw|=D;n`nHQJ`zB4c%JU_4y zd{^K(@cou;&kC4!wvYQJbo#mF#6TOWgu^w$+~A?1p9OVgGM&)8Lh^Ij?4w+Y0ix-LBww+`izq-TvTr z+yUUV?hx=N?(yKy+{?hPxOalz4=e*?5K#Y)$lhx3lx2=aAq zGw?@lXYe|=C-@_`H+a2!5cq5NL}E1;H#NYIZBO-HPD~9fqxyM2pYz13|8uH5XJDk1 zPYXOt@^Qq~RL9Cd(vQ3kQ>FYWDOdfIs{?GRGv&Rr1B>WhR?X8jmM--k>c&8bm|dPO zUFQ^tU!;p&fiA8Gu9E(%eggd|^>v+4puf57-y)xNoV$RYwNUc$?h5O)&OEWX<2*-vfr%xv2$Yl(}tf7WN4-{!nCvSYdF zlg}+N>*#JOF@4vYOWG@Sd2dJXEhVOpacjwbkk2bI^B#AWm_Ek*lF`t)tz-;%e#u$j zJ4*B#d-~p13B5`e*=bz2%qih-=p02%@AqX&eJ^c$Zpmhnt8-Sb_EXT*C+uy3t{DvE zI6ayFkm-3(Nz*HJdfsiN=iOF%>eUsTRtv23yvIt<1y*|AW2NT;D?RToX~qE{$NscX zNc=!aEAS&FUBL@WdV?P==@0&A$w2U*7F+4Fq@=SBYeR9TZz9NeDZAp8 z=gQ?Z(C-3W+E&Ya?k3ai^^(*qZC5L;bX#eq+w1U?&e*roYE6kLUvHJ{L;kX(^I=N5 zDJ1?>=I=0>zi-GekCwd3%IlTzKkwhizY266$3Tw5%zTmz^LHgVg!x-5%->pJ{tkZ9 zH@2)W|7wN#M=Q*~TVZyBY*#AG-&ChqAgl{BA>z0H3Y<6^pe9(+HD}(zg zG0EEodx1O1_^LRzu;SQkhjBd0zn7oG{Fsbm@8D|0v6mIcURE4?TXF1d#j%eS$33mJ z2ZMs9Ozq`#N0^&9rtTdc%tGGO*&lpB@DT8j;3)8cLDQ}c4VpT6SnwF=j0kFbrn$1< zRLJ`WuOs&7uVnh@cPP~LlP*s`dAz?PIV%qnzcrT;Q!^}Hl=(Xd$^UqqpbWLW##Wbto%J9Xy!wX4w`<; z5y8&%EOvAbbI|1V(ZP`i9G<3h#ct-ZKPmVl(&~5_ z9@P&#-U`noD?F2f4&BQxOZS?vO_uj+x#ah{G{`o zk#|{fh84#%gJJShjU`WoK0k}&dx(72xxs2Y>m2J@=UC4=7x&Wn&z9|-XW8ELV3Pb~ zNAq%4U3#9(UnQSz+2{E(JyqDwLHIdro#;A2hV9B=4q>~(%Ihnvuw7||ZMK!yS6b;X zTc(2w!)699i^&1GmjC}H)yVs z>8bi}S0H`zz90OnK-W43a-8POcalF%MUB*dUGmwo=zL0CE>3ksu=SJ_O-z=H(%@LP z^^~rX_X-w>$4c%LEI6&FTq1d7Fh~4|lx^pKdqvfHlK<@$Rq3!h_lo{E>vMjk za$P3s$a3x7%I()4a{r@hulJC9wdS9$@7uZ!PWwSITdi@Sm)chrBiItWu8mQLY6y5?_NG%lF{?CqGwD zI%KKl&QSfQ(xF^Wh06FgCHV*$-}YkRHMG7+#*Rg z2d8oz2ClQ&aTq2kl<~R@+*#ZQ?k*kzcNPo5-Nn=3F8CH(R&*E7Sn@o$yI2nHEmnc| z6YqojSo=V$#m|uU6@P>KiwLC;J35D4#KHZs%aHe4>Xlsd?Gj>)2>s(uvcmtJ|q<%0Qg(l4VuN4e{CpZ`3oTufIt($6gEm~xS`%Ebkuxvxd< zWTpM_pY%--Q33f(Sq7E-V%oW@T$W19Xz*p$-r-9H&lRxCipvC#8P#~i5z@N9~WkzWIzEqL6jzSlfUN>$jVBW!t+ z>a;2l=jbY?qy1zZ_LCjQi~EX;WxN)O8j9Bq6ww;0K|GRwOmb!}zYAqu`iL-fsV-fd zZp(ykKEw+VHacI|+DpF3+DpEWYG38D;wft{`AcMRTK+24j+$Q=`+{G!_K&}A?H_;D z+H3zh?VQ&4zKL;hmU?uDTXOCLvZM8HR=xVBRnM#zla&hTyd_Qn|4W<)ep_4)e#hFY zyWZNLJI*nE@6(;fp})?mpFb3DK>m^V0Gtz_f@fNF%f(jx{JGHUl(JMSnfhUawU>OO zIL6N;womGDOULEN)D<(Wz2p}Q-aEi9D}E8*fPa;Hz?J+r5p$&q>HID-;6H>p5AZi( z)`|Tg%sR16v?5I3`={s*{!5GlZx(aGTg6&%!ZGirlg^)zryOm3ymvd`PxjHAq!R(} z;nV}yb@Z1mSclJIVMl!frx|#KidK8g+Ek97_Lk8=(IJI+{esdF6IbxsBcoN3?^XF53OTm&w4 zt^|jiYr&qg033220ej99;E3}U_($;(_+s%RxRJ97+}P2RHS}#Yr@7w-rz+;^Az+;_@!Ht}|z+Z_+!QGu@;4#kk#Ore020kYDGH;~zi_wg~-PRO{xveSIAJFwb>u}w^JD$t+aIt6U z`jel?hYzyTeai&#<3dClpI7T^?D7 z%j52Nt}fT?Czs#d;ioS9>?fDm-QlM$@9gIve%ZIV%$KjBa?upMKs5(=<2*ArSVR0h z=^R7)%>SDkq=X>%zkPi4y{g>*&XHS6w($SsIdVI@k8iVV9~*JADcaHGeoi!&JVA03 z$#W$)mHe9IW|GD2tlwPnzLHx=K3Q^^SFT`ifs<7)nd?6b7}=B3Q~}d3IS* zD#NMREA^{Sl1EC-99_LqGd8bZ+CkpQ_tq;l^A&rQ?g#mvrG3DAmG%WUC>;Rat8^eZ zQEKM&qNV1YSEBSt(uv=}?|?$3)g4+-X-Zrzyiy(pseK1{?I!ZRfBdUJ*9i>dxc+Bu zNB8m^Y=_(Ej8ozo(L(0Mp+0kb$4llMqq8S`u0uS_=LW>{B)6330Y2b!Q<5+7nVXENRR+wnZNSq=StML%$Nx?efkD`d^DjuMAL{tv-Oa&io&4nH!E`vh^4Scou6Fgh|8+@%buX3FTxjK&53;i*qETy>Fe|Mv31f82j6Yz!dEOfq`(`p3d zgTyFsA29}eyF3qE>DP!WAfGR82Hz#_2Hzu|2Hz*10WT0Mz*mYj;0Hwk{E+w#{D>&= z3jpl_5h3tnf_6knmlekgGY=CfZ3DSi+7TQn)oZP}j_M0}{nD}EdZovM8)j+B}`2=z+mLSDagA-GVmx>ad4)sbsaKlnF|8t6{1No_9Pw@GoCHN811$>Vf2u_s#0~{?K3r>`pxua<5 zEK7bPI8you*ef;jMUhgqx|40MUg<+WJ5l~Mi8x0qiFxjV1$nx3JzOB>@&2?L);I4X@+8xBQGr;vbQXNg|J_e6 zce?%6orXN!s>@)3n9EKv9bJxChwZO>&NbKe zT(<3E&t*Bg0$um)K!@e*3UobK3zzviGi$h?_KYaZr*}`pHDZbEr_J|S_DiQJx$Y@v z@z$2>kY}uZa^8B6{37+BmE-qNeF+zmpVj0)nZJ0>XKsGgH<6bRDaT>!PuI#C4%;iD z!(BRT%dN02m(NoD{N+}E{uSj3{mjz6y6o_|rZO&4{-&k>7S8L)iq|N7%F+HZtIzfp z`K80Z+zS74;ddWozjuGq`M8F|{ISR(%5>T~_>RJ@!#}M2+U&1b;V}GVjUzUThVXNX*b5xD&Vma% z)s}oTv0D50o0N8?FjxC4MbvtLKV-c6N%QCwn6Iu3ulIPIZBD2YsM_U zIlUqO-RVnw0>v>Wev$L}s$Tj@&iS8C@@k4ZHK)?${ogq$A1HNL#B!Yu1!9g%F&!N* z*5SAm)6wB%9UV?)z2}t?705DdH*;6D$u2EgX6_~ zH~CX0f^Ie9Rie)2kTzK&&!$juEOCuZVreP3yBhIoVA*5?%O)FGHrY^{JjI3952$Dn4|7aAHQcKI zhuQmdoIcP$$Qb}`;tm2gc87qQSZA#?cFp`=6Za74400BM2RrM)jog2Oo4Z@UO)R@@ z>ehGEKWKfUWy8(gO2~&>XL2@jvyfAmiB%oDr#u(s$2-}!_i}%LZ8wm%%}>ay#}us_ zbs82(htr{0{i)-~IvlTJI{J(Y*3sc)KFAL<*G0^mD4aB(>2|nZ+jF^<@8wp$ms|O+ zPn2LAZe^XTGsvowTe)d^20ONmItrz2m^zly(~|c954Y;BgPeYlw{?x3w6pA_t$P4; z+PQ~<2RToJ2Rk2vt6VwArN6VHlk50tk`?VNd+6Zq0eO439=L@NjTlQT6N>TR^8a$ zHSa0*wd%(1uCD*HqK9khoSrWC!CK0+?dJYPWl+_PU0f0iRX29oVcnQzC2|}W=Jql! zecS@#vY!=~{j9j?)8YBPepX!iSaIoR#ihShrutd&>hJE!0U*cznECnsZWARYdDb=c z=K!mo%v$z8z#T{WDjlk=bWrDMsC3v*Pl2Oe;ztzdI*x%Hrvr0enGS>95&zVAI>@Rk z23d8*AS+&jWty`;+{e)M8?Ws#`8D2ZBgR>6#CWTXIL2xthFfKSm=*WoRvR(QY9oeQ zZNzA+jX2V3BZgXS#7L`+7;e=GBdj*!AghfSX|)mKtu|t))kaX5sa&W&&jGURkD&C- zi6K_mAM_t>M6te#PQwDFDYp^Dbhd9Jis}5fZNw;*aC+qej zzF4O1aaKJ!Q9eu6lM{E?Ug-WR*OSb%$sanK&K--mOtIoJ#fr6Tp{u4u349UhC7kuD!x;#_)f9nJ7tIMf?m7M z@nybK#`kP@HR5}gRX?0%)emP`;Xm7o``JpL{Nlc1lzy}0aYQSH#KWyRVcYRVXXqSc z)eq;py&<3BR)f#C(r1Rdza`iC$YHn(^1;@*uhZRcA-~Z59(=z03pnTg4!*!@Q!aEp zl=tD*9L{vB9Xa1^Mmnm0c#g~aWYxI%EZ9cg_gnYPLjK<@*XdRu=C~Kr(eYv(j!Q8e zJ(g!3J(g$I;~>s==7lofFLA{}U1l$~^8I2f-!HcEo%DT2duMi-?^VzlZk-i8%rRwl zxU(;G4zlJquKYjjy$P6{S8*=o+PS*n*WTW;g{3bz*c{bgGWf8(QU>)~s z-MGK>n8$r1`oGkeMj988&UNlmd1Bv~C-#l-)uEppcfuc)aldKV;lGUI{#qGFje)$@ z>9_cl1?Tcj%StHE5AL%vy83Y%AjLpl?_wZtaK6DGEL*8XQTC0?l5G6JvNM4HEc&w{ zd!5LY`uVRFSoQX6kE*w$|4F(Ce--tt$ZlRXZ*;xc(e-9W*UeHtyVQ?c;SyapJG$QD z==vj9_K%mXW2{NpFujy?vL{ z+jlP8fPV6WJuj!X?{xF}F6ZMt+v)A+IK6#`)7y7Dy?vL{+n;cH`;$&@-|h7Fu+!Uj zIK6$B=xz1WKI-Q0yvf77P?3D32h!WokMen1_4ZPE9<1KJU&j63WdrYy<9?4D_j}y9 z-{Z#pUN`Rd{uhk<=iRv9w`@b;S3llXnEaXh+_-=KL5%z7+_-<)jr-@_xPQTo`xlq} z+}izuTLXS^*^`0}#Jyh2(tCWq?ELxRLWkw~+_H3+Ecy?;%EqUiY~1sIR5oJY7C>L@ z8{uuC&(sNo9|b{Mk$r91k-v(v@l_dX)$Lz(dMw`P%DMTqWl0Bq-RQ!euhW5Fce;JS z>Gp3p-TptFZvTep_TNE&u;(kXr@HQ|0;_KS>VwwptAws^FIzCW{=w1p500*HJ30T3 zqwCv_uJ1Uy{>hd7XQ$i$$?5igcDnt$PPhMy)9v3C-LCZhqtosG;OPB_2d&%tgx()6 z+wh(^jz4g7{0DB1|G>@hcrz)-^oMSa|7ZE+KXG&X$ICWA=J?@#?3)w>{9`xAf8yr& z(R}hBxjFtbH^+bC=J>z6IsQ|p+kfVCyZ6a|p9A8X6d8wFu@^Uf7=O0o`KKXIQStBBU&Yb&ct_V099_Td%097}eDdEeCZGI?#pIKpTueUslZweF zKe{xPbsEY$|)uf^P%rc`O>X=d#OAaD)}tn zf6}s4o(JobzfQ*ejN<(JqnxjH!!CgXl)al;4WxIa_IQRnzGosK%wjrEyMzdg(7 z>d!Vf{q`)Uw>LPwz0v9Ivz^}FD0=&S=nvk@BW_eF*M7Gz zcEHW$Lxp}a8K3^_!s0m!0zOnY&)^FkuP!R4a}*aAFSR@u6)!V*rm$e}RqkD8dy3Bp z1`p49726EHsMurh#l?*VUsOED;ERjr8GK1`kHMD~hYdbZm@#;dd&kN}#TNqqEsVwC zY*&$Y9eiKl!6NT!&{+CF@geB5%JV?H%Ej#RPmq5@Hk~_Tc0!<$w9@}sDv#eUBX5_= z6Zy<~=u0e>hiUvQ;Ez09Do>=Hd44zWf$;SKUJ>B`04*!B%ZoD~in`@8CzF>snY_%& z2MT>w_E2G?!6U^ahldJD4o8ZSKX@LnnD(tk zWS+7(@ku5}ikDlRqs1YEYsFs~JX$OSp5VE{;zJCcaqm06s*s+=8!pDX#c=O#@iUgc zR{XNTW5xer@ObfegCmRgMHXLd@Obe$gU5cu}c{DHzd4IVCj z$lzM># z<%zT~Po#zLJ%O$j0e&)QS%Ej)ZYYqpIzjkhWA;OdQn~91Qi47Q(BAmi@(fqh-#~ zGUsU7?`YZY=GsA*=aAF?2j%@`Yz}(6coG48_};PQ0xAA!247u#zrlx!e{Jy9#g7|& zsCbvbR~J8N@N?aKd0z1`7+>Yj0g*lB&)iY{fq4FbJRy9R@Z@lD;UjT;U*L4n3!E-` zfzw5Y9e)lx{=C@P$HR_4FLw6vI%glRcY5hMXCJS3_VEU1A7AS1<26n%-B8$O{JF;I zsFyk&b&a!+FLw6vCC)y+#M#H|oPE5;*~e?0eZ0oKr|(*4D_`d9<8{TMFdzG238Ia_ z9fr<6UL*E#JI3O0_CguQiv)g=(;+W#_VERa^hp)@H{c0p+E9Mx@#pmk?c-8;TJ7Uf zc_MEYPzUw@QhAuh&jS8vAD7A#X=k1XvX6T~%Zlvf#hH&qnY_Wt@m0<~-r(%xtDGFZ+R5Ilob0{Y*~eEo`}k^SA7A6v z*ROW#>({vVMZUTil^yQ)c6#8&g_oF&y`=DVgI`(vl)2mRll zz1G>#n~E=R_!k*`Q}HDRzqWX-!8bYE`nuw44FCG#5reNUyxrhe7T;s=Yl?pjSm(kk zWG-ASbK&>hT)5$=@*jm@9x~?TzaPJ&O!_>n_H?N{k+%z|gZx}7Po#x;A}xgP2z0Fo z@HHaaZz^uMGs^ZGoeg@UvoCLo-Gm)^le4F9cJ}mUXHVbk?CH(Uo*r@b^etjfmA8NB z?CBe&Ew!g_j3OpF1l^X;0{(Bcr>_-SZY|CmEw?yYZgI5S>S(#u&CR#DJa2dQ;%&~J zzTMf=w-uL5Z)i{7UVNItw-w)K@Y{%qdyUQs{DDYv!}N_ zXnXoL;mLc83wOoweUH;!?{T{8Jx+JM*YW4QEvpDmK$cHwomPK;_6a8^pLFtYcadkU@0EVuRpfgL)aSY5sAKdL z*Ehm{Cw;rOc*Od4j~kC#^_$x&wJf`{H%+Ce9q0suNGs>9Q&}&Mtrpx?P)*W zmY3qy3(h~eugG(N{PbsEl=r&oxP01;%RSN`9hZ9+=|h%Xz~2o7@Zq=+{;tsetzz3J z{n@vyt^Vvg$~V-D_oqAg{Eob1SI6a>RuB8I9DnyqAHH84 zus(dx@&9{{|KD@`|Gw4JpZ%+wBmc|kx}UoE($9)rXp0{_4_iE2LBRjZ@%-PM{QpGc zU;F$$$J_6jOtSx!^Am0*KYvwRu>RlgczD0#VZ70meaK1=u|5>syW5tPq7T-e735uk zC!_ui*?&kMv_JP7?en1yel6fHuA$gB!q1StJ*1S~9mlt-vNI;nSaOEhRZW&&m1urBf}>1*P;X z&-T*U4&QI^d8Km<-dWmZ@PK3kL71uoclLQTbGv(TU(bo9$w~nc$wqjVUg=hD`-r6bnXHLk5|TwB*# z+5YU>Qi>tp;PlKZoSwO{L|=;^zTfsK3Icw8>FEZ)&W-i!OQ#wB4W%}NU+>;u`Ib_A z#+mP>JxAA<2z>j&;O~jzkgOb19|zuU21L~z0FjU$^GQB@~shhzIXD7KDEwWAUj{+r4ot?sjwGZZ{V`<@o<8Hy7@8 zc|KE0cc*+>>^|GZ+1Jw3LMQsOFF2j^mC`MS|7z)-27jrP)~8=~`sb^qyOHNt==0(1 zZ%Zk@efJ{yWK|3J`y2s$NH5_>yp`}m>BLV*dcW>u@9R$XzV2ji!O{LLCwmLd7Jtjh z;kQdqV59i-XaC@Qp?`G##&?|E{GQVTKPs)(qA2@gXHS3Pe8qn$4O*V>yLS-(Yv};+ zPXdh_vTukCsGR&y>A%k9uM4blqVOuo|8?^*=0n~5TEO3P3E)E>5?&`f{CVj{zka9WA*@u>&XL*hg?066x6@gEB5p zD8KVFQMON#_crQSoK)soh98_OD=S3!iRI+OKi%ay)xAG-WjW33rEq&ATV5_BF?dR(9iB|s^RuA9p`%uHLasBLc?=!79 z8R#j;GimVo%a>T$!E#z#?Jf@)eouMS;EUXQBrkOD$-TJTv^Ur2KmZ zUs`^h!IzZZVDP2oHyV6-`7H)l%V3jy;QN*54IV3h(%^deQwFau-)HcdW%^P4^k6`^wu5er7qHPpFqKHT>FgI;+rL-e>r>@}$A_^0dKcl@Ay^QvL&j zhstj;c)0uygGb7LZSb?pA2WEQoZ|39<<9}H^J2Bg>fb>LZOC?(j{|{M$s8FdKSasR z=J6fJcL6YY<@3L6iTsg2%uoLOy8MwY=KosgGv%D{>rg-Tf6Mcqi*ss4;vHOebl5Fb1nTW(Vj@cuI6Id2tur-W}hHz%y!`H+`i z3;2sVle7?ir_l1ga&}*&<#sn`Z+CO{cGpkj51;<*1CEyWx%bk3pnS6B`EdEk2G5tD zVels$EuVBc|L$^HV}8)R-~6NQov?S7lTQ9v`88JOo#j6R{9)nA`yEei7yhe^-JX-B zP*nKym*EHbL--TIpL@&m#-DrKSl{Dlzt{2P)6y=h!h5w`p3gdc`+28t?<+57W_;jB zxpBYG>Dzm(e)f&}_Dks7^6dPFu-x|q-Y#(IZN%>o_+)_x1nv}A-@bFcz=MJx5m?{E z^IU;16#T0NzDVFd5%>~;KPvF00{^|hmka!F0zXP-%R}GJJIwFPt~hShRHwjSEFS^P zzK-JWpTN(D2l$78pMyNa6XqHtu7{}8595!Y7vt|R0erZ=JxlkzT^L9*=ueOA7N2;qwAq0sapHd}&WHdv}0O+lcck0sbr`D3+yHxCH?-*7-$D zo@#*a$7XiSvk?RI!^di#zeIrPbtftOf%^*Cy94|maN-~HRK@qB{xbu-c_r2!PgDG} z9*Ji-1APB%mQ|jv_{+O7b^#`p-*s^LRL%2-!~gF+6da@fKfZ_J!MvlkiFu77l+XJG z*2wHVfLCOf$bA0>;3L_^+3_J?Kk)H`dw4QTSNRYxiUz+3r~J>h`vhN#Lur3U4;8mLJrnB;_6(JrS-_Oq&ze79R#O`61-bJty3x) zrkZ?+-(+wl+iLK$vTX)$&L$1sVeu2(9bx>!of6rLEYCppQiCtZZZLR9_WK5pWv?{2 zmc82Gz2L8YaDFfQBZK#5w-|h7_6~#BitXc1#458-7`}$vd9&>J+sIo?XndY5@Cq3h zeUI1*J7^EPwvEfqQ_OwKV_BL9pX>M;_w#m z?Qo_ms}BnN1s2UdE%4^35=@rAC3wk_?S>#9&aROkR|>57n+1N8{J34_71b&CWKNXn&#W{33oK5!mO>m~`fDeZV>0JVEm;7H5c!$705Ew=l zpW>f0|A4?J34DRTrwhDG;AaUuDDZB9cMDt>_(FmCU+j;!zw` xrE4-h`JSN#FP z??d`wsppaSrJq>OO%DGKf#Cq)GcWLC1de_JeTgp$UUkCmzaU*O=J;GG@JfOCU#$Oy zf``+A&rJdgn5}*%%j&0$NgUx05`J9ZX({l#0yhMHt;_TAze@Q(@Ls|Nk)O{9 ztZkisd&+a2gP9up{BDTF3ed&5{As`&@GIFPrI-2pv$pIw0{HYJ*TWP9+?zed;6BqF z{n$->hT%7u?@N4I2W@cnrIMXv`OnL)FnD{0=_DV%7iPlXZJBb2{n?o*hY0V=QtY}b zORe6k_3>)~e=j0{&kFnz{zu`@71_)eR!;o_@DbSJ(CYVeSqVdJ||RDwv$70zDo&7NfW>)BHb z-skLi9qSYQ^k-MfdWyerzgD)|@RQkEgRjiG4ZaF*7Sa#L+KR{aC@B zVtL+_J=NeByLTA9C0lR!TiiR}Z_T3C?}yhC?}A{TFR=W**%b!&Whs_*X4WwLhU{eq zpXuItgtvtlzLLGp;KSL?2Hz_0LV6}-^)UQKzLWG^frsTiCOTh6WbO|L{yFjvlxG9p z5F%D%0$*-=V?OY5sks*AY5{qs@V8W+*N5`VL;f$7C-R4RA}xg97wF>rjqoVom0wCe zOdH@P`b<3G^YHyl{PFXD^V&SFeU|cJJ__E@hoNskn7%=)T_!%-Wx~-e6Q*7Ew)I2c z|BCE1;s1SrkH7<^Uj0UZPXNqMKk~#6;iNO$oX+f!vm%3_d$! zjFBJQ`j(uC-|rz;%_y0B73L7(-~X``Sin@CS5ue zJVe$x;oj-io84vkD^9O=$vfRtSLl0@evGl(084Wm@)!Sx@EKVV@QHvAXKO^aJX!E7 zvt^;q`S5+3;LpnGkT_lo$U}ZEm8TiXGY{>{QhC&CR{I`lVg9eRX;e_0Z$>qw>e+v6;9r6vpvxM>{S?pl>b_QJB8=_WsI0YKHPx6 zXA2+h$c`8vJ|cXW0R9MOKj-GP0)I@-rQ9U&$L;+u{n;JJp&!DO2Y&djoJS}K_+BZi z_)j<=@(z2a0P{b=?R z2L5j4)QE{M5o4gI5$1uTFRO{jPG)YMov<%ksCmyP(_M^UIY&(v7DV_E`Q(A^D0ug)|nu zh2$&t6{?p1^umzAk132A{Mf=v41QeUdV`-{c$LAY7G7iU(+jTytbR*L-nY)AdH#n_ zx(b_0<_Rrt`pU z#veZm_#+RO$`kp?Jdsy~$*bSAKaq#5=NA0&vw*({lWxLY;K>TUmGkdcPHh+Y+$FO5 zi@&Da4vHLJCivZj6Oq3z@SehB0rP|B-;G|xJdL-+KSZ-g9{L0PFwesk1bkWHi3VR@ zSZ;8&aH_$>h1Gzci*`3;yQD3hzk@QSzbE+J0>4h+Jp!{hX{6n`4S4K_7TQ4L9e|Hy zbw}fsLgUv2zt7P);b@FI&FufA$QbK~?QnDU`GvIBd!d_~FDgW@2l~Gd?I7-Jm1o&B zCOIW^{gd>+F8x;;uM~d%Oz^r|P#PzU=kx4H_$=VNH^8KwaCx4z;~7TKuD;oILi;Je zAIYwBdL};cPP(plWA}SP<7(u`**s}i?f!K+K6AW!rLDIYj-V}-pIfA@ozm8=(w5qq zw+f#x75v*ApW{<-?8Dm&$DvMs`m;YPBwOg>{5ffvdawa&Mr52eKut9S=p$;8?$kP&(5wicyl&k@VVKv!CSM2 z!P~PrgLh;H4c?huZSV!z^9TFx*$WN6D0{KN7iZTPd`b2)gP)UKZ}4T=%MGq( zzi;pr*{cj5%3fpeXm*pqwd@TBk7sW*cyIP*gX`H5gD0{-GI%n()!^yuZ3fR|?=bkP z>`x7DW`Az*T=tg+AIRQq@WJf81|Q1aXYli~4;uWt+20uaf^6R4!`a6SeqnZp!7s_~ zGWez0-3DKq{jI?-%RX)Jb=l_(zA^i}!LQ7|Xz;7DFB$yCY{B3+W#2IP=IomWAIZLL z@E>L0G5FT(pACL%_C15&mVMvgcVz!+@LyyeE{?B@o*FZ-3jFos`^bLT@@ z(cr(y$_CG8#~J*w?4bsKJUhYQJF`a`d{_2pgFlh|mcgIO9%u01W=}Bq-t376e>!`z z!Jo-aG5EghDF%N&TVe1Qvr`TJQnt$AFK4G4{FUqsgTI=!8T>!84uij*bs4;ntuy$W z*?NP&l|9qoZ)ay2{Eyj2gTIq)GWfgM7K8sK+iLLlv-1u9LAJx-A7%pv|68`p;2&js z4E{-Wk-`6-U25=8v&#(rkF08NRv0q4P#8A&xWb6R#}`HoerRFL;D;6J20y$oVekos zX@eh8m^Jv3h5ZIUs&L5Q6ARBX_|b(I8vK~TOAP+4!nFoJws4)nk1O0@@Z$@wF!-dx zD-C`^;nfEJcHt(2pICUk!6z5qXz-H?e`xTN3r7t8ox&d(d`jU~gP&4(o54>l+-C68 z3hyxZ>4iTr_|(Fm8N9Oa7Y45?{H4LC7v62~>cV>rUQ@W;;I)PK8{A&_puru54;kE9 zm^Zkq@KJ-i3wIdYQ@G3E^@Y0)-ctCC!RHh{Yw)>+`wZS%_`JdA6~1Kf`GvnXcw6CX z25&EX!{8l-ZyCI^@EwB(3g0#Og2E3B-c|Tt1`if~Z1C>FPYvEv_=Uk27VbCrqC)ny zsK+iYEHn6$LfPO;3&$D!?83tgeoo;8gD)#Q%HV3@(FR{pc&x!gg~u5@TzI^}qlJ?U zt`(kW@Oa@#2JbDLV(^uPrx?7iu)^TU!l?#N6;>HMQ#jq=s|srjo-MQom_E>d>8Co< zJTy~ltQpDHjvN{vovrV$tsPlAIX+vfO%2y($Jc6l_&TF)&3gRnUYj+h=VnK0Yg6&@ zk&%w-$n@k)eWEtHCaX_1dz&lUR=2M@b5_3k&gf{btgG~Nc2|1(3|!ZjjqDwom8>0_ zwPRIZTVHzzQYNOS#+S#6IyJR()ln;&n`+d@r)r~Cq@qPCsL+4#t8<-wLNkjBcWL3S z2U9rL(e71eZfJB@`bc&+O1JyA4eo2R9(W-1Mk-v?rFE|6b&LAd?&~qQFKg_bo^2XW zdRRw$xAd+DyzJ~)*WJ^{|G{w0x~SdtuFQJ0+t=OI)!BvY%5EFL_BLkhk-7o*ozdRW zqa$fmro0dg%iGRMJGxNi*u?Zu5S$4N<4kA)P*DVb1u1;+?R_mL_7$F%9 zcFiv89|i%LI)twM)AdoM3T$R?*00K{)#17NM6*6s-9J>Fs5g`&9boTi$kBleXHQR0 z=%fjJJOfQ;Yv&rXYwJ@Z6LX`rwKIpBd#9&5)>PIG2()HzKE2T#txrefM16QIX7Jj2 zcTe{jGb3j}7aXWht?gV>LHSrCN{rUVP^3}6x>jYUIyt}5rOGO;`@vQ-G&_!Js@3tS zxe-TWB~%AWTl#vav6dRC8T)HpLPAwFSSxu*nV25g$9A^u+;o0*H~PCy8dwJw3MYlv z(_>>oOLu6z6YK@9yArU-3kth|Z5Z9%t096MtW_D<)Rde(p*(m|og8j7hnmWxZgA(! zmEEgF=BiMUb5M;^R>iIg229M=dXWXPHCL%t4~*>XUCHWV1|Wu+4&fS_#W&z#X#<+VR^77=ni%Zs3M6&`d3JrA zs(Ck%;83HZQk|+D1Yuod&1r%S-QSm+RMp9$g9k<%0oPq&hpN;h0ka;w1tw*I5)sTn z)(ut$4uHD0K43sxd$-KrKJ>IXDLo>uJG=R_+Nh1>xeeT8J+`I{k&Js`(ek}w@yXGl zL;0Qwb~;uNdq?b{8Z4QPZ0M3yd>~cPzFr1E(1#MbqeI!iO6rYK%y+G&qf3|p!ltLH zwS)C$6%(f+6hc%Q9gxwnDM5h|NjNz0?R_$M7=rdTD4ObMW4gL`XlfKHRPqWsq>W9< z3}OPf+0j-}hSwX_k%L2q?TJi=V52M23kYJP($;1}4^iUK_e9C9PmKmr0;9*KF!E>y z!j;C6odpi6goWE7PGh8h{i@Xv5AA-Aeq`##sJHd>tnO%shUdUHhUdnlOYIQ9Mo)Ek zs3DZLLt>~jZ4yA%u0{$ls~}dCEEFG`nQn;Oqj|^*a2!)l=0ju4)@_@$ zHE_MLzN0!LR8=C~Q49jr2SR&6+Q%k_Fp6jde1f<`b63aUH3bYP64daR)Y%D2F$3UG zR40bVT<4=KjZ9DPtD`XI>gp&-ux;QxdW{hhreb8q(0$YvgsOUAw%!yLsa?S|#sI`T zV6Du{S`D)oNun_E4d#xq+AcCr1fn`JF|Dlb5LP1-I4A>%JY6d9b5LI>h1NTJDniSg zajX)=&1PRkC9OKgA!Xy>d=i$9sM5N?Crb8I%$Az~&<#51U7gUEKBftwxR=D|i)EK=|4?sZBeRVuYKW&}}US>=H8c9fOz`an&m{XdP6ja(_>@apt z3gV#0sl(ybLM8>PIdm;UzPhWsr)7NCY27X38-`NmUPqjdVdUbVb9&3zv2P>`;|2lj z44SD^G?S=;)$p37dv4Hs+M^^Cg zVnY#O77{fyDjFMER5P)1I$lyYd4&eEIL^ofo&R} zo{bip%7!u&lQCFBY5*(ZOT(YQ(C*X9D3i=wZH(4I3M?Eo2pALe6BJ!-Xi6ov67*q3 z^dYi=Ur}w%)*6wmt5!!TR%A80LKI}SHPa>L5JTAosU>C%+!l`?R9AvR?D*)w!v;Mr~ANh$Nd3Hkz}| z>3!9SA^Ic8FFId3In)?w)+cLNrHF#Apnj!qC&x6Uv+$Fa&k!g zKR|)wkr$NeFiG0r-avrhhht{WLe$ZrPN1l0=>ovx24AY;byEj}h#m+bb59O8rw@$e zlOT0WIxy0lo*d34t%pc4X`;I_Wc1~vZ(S5V_*IgX&g;-b%-of8Z92pQr>N3ffyoBN zwI+0MjZ-Lzyw0J?;n`Ly+B=~Kl?;qV3mxs9LDyluA*qcVE$x-SumdAVZ6UP13#?Nn zuq+F+Y2fgy+b0sxh56v-=X&wyM3BHwnCQu&2}psI#f-`4 z6p16-nN^xw-$6izmS8nFoM;)-tfM*14C8%kTwuYmi3Y%(LEFFGp7*s|_!VBxvsiga>l zWN&>6F|(Ljtzre$BYtN)RgJ7DX(_66J^{R?CgK8(1=_n~3ECDqk)c3J0MSU0KmrO> zp|={Y7+A3q?a{-PhFa6XBBpa4%l0}uVtZf&I}*#K1k8UV!~$AhO2zD9ed}Vm)^sig zm_Z0ZgTdtnhD!oe%(b`l)TJm>Q0Gi@cC;Rr@a^r9i&&R4)TBLchph+<$wf%wfTZz= zu>iW@Qbd?EQyqwxVbJY;xS|59YymM6vtYHDYKJ{ddycLud2KVNm zDrKF$9=v3SvCf`LMLBLVs+0~X$&nJi!`%wY`Sn}}YEe-H?(PUE-^(hTz0Bc+t8%3z0u}>H! zi#HW{8SE+&>E@FQ?X-hEW224gaJ>n03{1|RVQ~r+{1>4N{!p$4jw!7FnOadM0};dp zgH6enVHDdD7(~Me5gWAv zjIq>?Sq5yJop3{9o505TRSEJ(f%X+EZ>fohPBp6>Lf;Ubsy0a^I@PVhh^Y&#>H_OI z8Hs7HJe!g8>N&wM=H}2KPiAJ2&|N8iK!xNA|x>_wHM?q(wS>8NL?H}sWr72L@&~jZ<4Ei zbjK&UHkCh0Bb2dfN=Tb>2Et6v_ylDNY6Bj@Kr5ge37z)|CNNQZX%P)RQ^BZOYtAB= z%$d~QDY7mprldPX(j|bCGZt3#n!o5(oY;rtkDXoZ%hA`Y&OH4sel zrEK(QL`M^aqCkV2fP_S1(4jKk!SbO|63)hfM&LeIk&c}B8%S^!Dg7t`wDpvfv!Y<{ zr&Iou5|bD2At)0R33`(%jP^KnYX&uf z*Ci>5*$i_q1zIzkt&L;*sTO86*g;`ZXU?oPSwbbCW_xmaRQElIR`SI$oN4Z5Lqs(Ni^3Wpc0_~>>0B%;RG1ufAqS00<`0&7$#_Uox zg8;0NK>)2KdOXw=C8_m-K@VJS$gTYiQ;;in ztytz_?61(4s~s$R;M#o=Z6Zugu41(bnWeD_dMmyKSqVPiD7q5T09T4+P8XOY4WJ6g z9A=NB$+g=9cR%FTiYaTX7);WNA$GCf)0pCM;&laq>LPb~gab0IAr0#Ph9Cl0I_CW* zftx)LeC^%R+OL&3%P+m`4kFhbMXoC+a+=w5xck>iUapKb5(Kb23Lq-ULP$~7WCU3ydan@>$`DV)qw$N9(~Z>V_&;$z=3ZbJH=RCaa9u zS`7EZ6~=@sDHO=gX(|aF>r1+|$%HQ|av*!DSf9jJqx&9O)o3Kc8+jtqp83q7862Ac zIRl4e>1hs(ZXwI63rXI!k!|HAI5Roa+>45!>j+W($yV`PPaSfHFqjXB=uTKzYe*!P zn^!BA zA~rGGB?qWgdkc()>46F6cDB}(btZaBB=%Auac1y9mgDg%~_hZ3oUBOJPRbxuM zR;%@%(Z1Rcjs39XZRz*H=^2=NmjQc$^@F_~t6L;SqCHp{V}R+MvVR~PYeLF=TwBv! z*HHlm!B&ZN0~IBoC9j^I;_d*7%5-U7&0T63x5j~*?FS3*nZ`nl$YSA7#v!#7*4fYe)x_=Jb z;@xu~7w?@jXR0ymcLKTezInBI_3Ye44GtX^2Xs9^68YQF!!s5VD5uwvS4Anl&ScpEQ5) z%1~|!sz|yk)0Eo;KstV@1jH74S!r&HL#s%0oLbn<2)&U8Q*Su8kqcHcFDVk;Zj};| zpO}JxYjkW8n^p4w&m>n(lt))a=Sqhdl(|Zyj^EU$L8?X86$#xNwzmZ-2hW(9YdsXl zc=8Jd2_32J*Uc&*z+UWc2rfpWB)85)#p8e5$5JjTermc|AB)>a;2KAsl~*-E;>)7t z*s_>ee1qpPwyct#D?+PYALfk40PzGvaIj#Df-nhTLO>*qdKG!OIz?SLZ#Fq6S^_v~ z){ZuyaiVAx5-0&wC5=e&wc~*gIT{dz1ZlF1Yo|dZQAuRMnc3-)TBDIqR&u1@P@_1b zgp6ZxV#uIK(fUlSI;M^0MvRG=fnuz%`J}2+^up6cz7=dg$ zB0-O`^JzI22aRi9@$VSIi^G!JzYN?L>KG}S0|NdYcfZ*Sh>IQ5pIyeruH0b*(jdXX_&9sIZ;M1$3e>@6-G~4!U@Ex zW$7dmx`do_M}>|~?n`?l%YzQqg~MudQL{z<&Z&uEKshnE;gHHTTa~zF3<`OZmsSa+`J3s7SP7QOLlD9x)YWaKelfkJm-SmPJ+9)Zbz01!3*|m z-m|&4i{Sa2ckS4`t+#vi)@|FW_}e+Ksdruav7@&q{@B>Rt9O0;v3u7*FNTeU26kP@ zLf9$@zjkdtrxy_*{@S#2VAoEhb;_@^H}((iKEJoUvUh_HTn|JN%?Ep9UbK|B>^0Tt~>^*~3P`7d0j$Uvy{F1b8`L*-x^W@jM)tk2US2th0 zb@%4}UK}Cfuk%4Bf325a{d=|#;8$mx{Mx;1|yp! zI6+}N8L`25p0oMFD*oTGahnV(f9}}1V>A3GP1}9$uFV_KEDCZii~7V8p69+Pzcd+8 zpRgH79KznRxQ|Sst#0p7DHo5oF%&ctL`{oA{wVvhi5$vGO4Q1B90?l{LG#2CvOlm= zY*rNUAsmeir?W5>gNU{Ob`)v{N26g(SeGx^X z{>6(Bv8|Yqtu{BBP)*{T<(!iBNMn6>qsjQCXgW+-rI5YADGkUo7)fHNI-2ml#9&B# zCTd}zWgA7JSKP^6fS`}(I4g(Vca~uF!%r$zZhhcNa5O?+d3y`|@K`w~$Y0M1? z!PzmQwYJ_MdVN@G?v!vJX9Z5<(Dt)(;4d7S^+_-#OoAD~uIGzLT$){tB+_QaQW>I$E!iVwAVWC9(6VK( zPhB-v#|a+kjO>!I=GYzD4?SCmL;bbc5o!v_DWPNJq%M(4sX9VVIF(pUCK4q?%)F{1 z0Ep+tc68@yQ2}(2OkikMD@T7^kDC}tv_RJ>^akBz0ft|7(9x~b8`R%gLV~U=0oG{= zbS{)Y-bLAi2V-ujqhi=NCw!fZaF zqNniKI?s*=!owLnU=L0wc7zjmz&d0&aR;O*2<<$ygB%`J9?2pTHKT*UW+1m5yVrQKt!GHtUKnBK)ib0(?ycbw1+5^Q{{^x>H8U}U;4_?Kqh=kBB*&g;>LRfT z$4gxWi{vL9H+5NB1c?XW^35!ks2-ET*wMTI^_Voq^p=5K4}x17$qy(`cI7kpfnBc; zL^vY+0CVloQrL}jdz(orh6(ZM7W(DLoi>f=Pi;|hyB=V7Z7*F7W_wE~maa}knr3{k zcGVnr74sS-*aU6?AST+liW`dKjO&1foJ7JWps}lgbF3yUI*P#0)@Et}4+{pbhP?tF z$N1qKp%P-l3iqmNW1`M&O`=nT0l9~;wofJCy@jY%NJ_Bz7E&Y!EU&_!zKu{-S*!H2Bnflatj$hm&VPm=j(lI%+B)@>h~>J|-7p%`DC`6L_~9`g*G$$_sD~ldvC>p$IF&i3^))>jO-)ok z90R9`$v|o{dZ#0Zz&lO?C!eJ|Sk3NeHM^tL#0JtRLd1-3VsePF>JDbJJDSaP(QG2~ z%q;X@s(4*s^SWp^*Co3N+8MQSnSCtSIg{Gxi7p6egM_)>_JqO27Utfe#$Hz>kkJ##=n*n70Z~PLWN*Iy zKtm6@D~?4E`fJV~XvS=Ejjjg~BpHn|fe*!{hSvuokccrG!2x%l7zp7B3rPjXT+u)b zJU$$p@t)86Kn+uH+S?TgGP6F)%zBX-jtBf)Ig;x#htXIcM+46-J39S>WjEIc{N8DN^I6g?B>hcL{U63$}r+MX!G5QAikE#nPBJeW3AnVcb z0X~SUrodSW3JWPgR5gXVP8}bl@R46EAteQZ2MsJ*8z~e$MRS#_CIjf9Ms-TONl9yO zg8-;^3WXmYPtaVAU^7C#I2*A)jPnWNFKE6=Rh1zdMnS5BvWyQZARDD!`vS6pmjpD9 z;WJ=};RibSNkB_v7FbM|=p+FfC$kh-hu#KPi3M~ftNVv}8T}0iV*-u=0pkN-ZIUur&SuW3H3w@=EuuaZn$d?K z(PNtO;7b8LT8|49&=|ca;;E82@X7!wWRkCmu?Ooha(M0{4k&BPF|<86O{i5Lr6Joe zdhPA%B>|tN>v~St4ij>DVukq`hU(C#Z;+q%P&x)v2)sTo#`+u8acrof0G%j#UIOM7 z38X%0+@<9qL17lKRk#LJOz`hjgMAnu_#bsWLPqZ zbWJCmC?q~`fgJ;9K}FHA4(bUpdnDzw4s}gOk1)KAiEbvtFT!JRcq9aj?^Bh{K7dc1 z9l_ndEG9UZGZL2HDa=LN>bSUp;kNM+vSpOBNmq} z9IfkfQ+SqsG(`AX1G(~O0+9ZefKGjMfk$4@Sar{)!3vr}WS$;FJZ-XP$JR|d`!{O> zy#+bFIdEu?wq_EAVq`DA&`~_8^WcqiT#`7hh5C^Kf@%JY_lR9<(&JLTLx0=_A4@6m)tf(@>|ExZgRRfyd)4ytT;V{P)_ z2NgNT0#A5w225GJ>5!NbZZ&cJ5U&UKr%>+nP#3;rV7jq15?-@j39{QIDz2e{L!0yh zmP8Am+W;+k!s}QBbVLY|ZjcbFqoc|fBo~WHBP89W(h1dt<~c%`Y=e?5^-WKBb;=gO zF-;!u*}0}Fd&IekqBDm`f66dSAEK4Kb!1Q4o{)#xX^!CxJOI7TIoxt%q%zE0|QKU|;9Q%%{isHhUhm#Qs1BJ0Lp z90s6pRM=Fc8Y3Vv?z{sguul6;v%~5WTiRvXpnm9e=#zHC*+364X-kX z-XKI(AK?yJf1O0>3c_`F(-u6wD!bs#l@+x^tO}kIRB24RsNURre(lhK=~>yMKt2ea z2oT~2*u$9Kh`VB9W~gptpw2}_h(7pri8`2EufVEIPUKePDzg)^$idLDFJYn~qb=ir zREY)Bt>U5r@Qq17#C@Xsh=@f@+_V?rMfi7U1LA{&*>PC~gvtva)=`g+&-+PlWaT6! zLa#Vwbj79hNpp;BNG2p2`;t89vpC4WwbU^w8ysZBbde552Ee19z(L$F@?pYo#~r5- z4gKo`f>9YK4sj4um({o6Z5ZfrIX)4rgd+B02Hedr5+4H-rtW zlom+P`fvlxTnLKD(n;5>T9AwSI1hATD)`(QB5X!!0lzp6AsI+#0pu2*%3z?Wt&JDTTVQjH2wVj^gkLOw(4$C! zW@8*Rr6^a~Ky4zI64=T)v2_am26iMofz$>dP?iYCpapR&o$wBVFGn6v{iF*#xN$?))h5ISW)uuDu#U;`;pLh+4t4Dd^KSLOPsBE?Ib zoJ4KG&qRyC2MPk)YDXa1RxDEgmz%M2Xd7dD#iGHJt9r6Ep@o<(4K?gYm|Y z$$fOXJsxfJShX=d*2EDReH@O)J4{$S8On9!gW(j1VgY&5CA#B(B~=d=%B~tIse>M-42c z!oKlX70c3TTC&@U$KrD}xiime^H3JL z90S8Tf>K7}*sMG%0jxN#_*TzP@nIp3e7vLu&vtl^j9Bo`jyKzC91DX;K@!h(w7S~* z(UzyOHR$AfkUNa2hOnk$u<_WeC^SH+7tX0oSr586PB`*-?8}>RoV#`+5x4~` z4hm0+e2WMu$ADf+zJDF>B+_d=haqA}9G6+?;s#$LsQ89R+CCU&vSp)thh$j=;2VlO zoWt>9>JZjcm&XV?Vs3jD5uBpu zMl!t`ea1j2QfUhn3NH{g*MWAt2y}bbJWPBx$-N$otVwWC@UqZk@IOf?dP;@B9_zy3 z!Os#rz$wprv8*`goW_h+D+RdHNpxCnawosNe^EcenDFS|F?r|temqd?xG6+d8_}0{ z!n3`mTQM!nk>iFSkjD-G3p>TO!(cI}50-_P`Z-~{r?p!ULXH_vIR2M)3v)Bh#&G2$ zSMt7h7wNQGG?a2D1iMF(IuL_=Qs*9*U}oU*pmR{b|~8 z;Lt@_;O|158~4q@Pl5{2a00%-d&Fc~b7YG!&;#)yvo#*pBADEqsmqnL5~skkf}s)#N8qAD z2^ok^Z%P5($01(3AXjkkKy)J>gw};zT52kxI2FId!Xn(0oiYbfxx~etZ`dFn*^5Vc z;wdZc>*RH3;;8gVqW@@aO~|6ZOhAIKsN-f<*+w@&6!}Y$7ar zuZS=Ot1|9xkHkYhKyt~0FO^%KX^)1J+=18#uNplSW3fctxnSWPOz@77LroXkiR*{O z{=#exHzM-MSS%UwhV`wK?@f^j@Ku2QnaM%p7c=T5TZ3`&bk~9riQCD z_y`7O77eRRFbdsnxn;x28(kQc9C>C36Ha5Z(^uo+aVg49ws$OY_cR2DnqiuPgWI}maa7IA z4LWcnaA!0v!EhW_oY4?tIEJv`$TV*j_gB-a(gl7BhpHqf=oS`0Lu~PgP3p7h-P~}P zvdzaFqk@#y`z3Wvk)YQ*OR_zcmEKcCdnlQQ*@(a3f5 zp+YLJoI@Ja0!jCNYS0Q&Vz$xDEv1BXwuAujyLTzWxk(`2}B?fy*Pt#_IOo3&3F0Z&HlU9o6_C zAFALCqvq7;gIuRZY{Sshr{FYh@M7lSIu(d)uGY3}*@`QrcMa~YZajM{6g7VjUNVTM zQ5D?XziH#PZFszr3H_V5Y}~U=vugUbjiDr;zl@bjPh_ReHlVc7s0=Y$l~4{uQs+06 zcG!0rn!t0|lYGD)9mQcG$N^3mMZw@nMU?FB!@UyBgo%inKoz_|EL_63tpl|yQW$;F z=iQ`bwNrT124;!`?L0q_j0bY7yDz~bs+;lTtt7|GibI0)z>P6f#9 zV}@|=J1W4SrybRJKe1G@=%NK-QHZMuQ#B~SQ5HtQRhy*S2?W@hYQ+V>@otT_KJr+y zfXBidi(9~YYKl6o5EhBIpx2P476$#p>uP8)GKFpxCJ{6E{&4;n1Kf)q}7jx<+| z&7pCho}IIK_m(|7Hi0JO;3UPDgwya1&Rf)N(C4FIBVD+RZ}8OesUN$W$&^)G1O%Uw zKhV82YVGmnZY1Db4P=|Ofo+)6F}N&p5|3BO4~?~(n2FHE(?EKf0FJ+$4eHBU=#&nD z0gbL>IE{j5+|pGNI>{JBuZ=eJgebae6D^#8C8eT=0Pq-%#!=!)5vJO#L(n`tl@rjq zmf?e3qkO(gvB6KlgJ2S4PA*4S#0}gAq)qVF76SB=fo%N(kbG!y2AMhJ&F+*tLi-=Xrt~bT0p!e?480H$*HK| zGz}36y4V{?ToRHqctsU@29|Mwg`2uF*;5~ArsyFH1!?4yyN#Y>*zWp`hxfm6Setw2 zcp!mfizXw0DG=Sl*6|2l;SwVl7L2eif&F0w!*bEaDx!6rh-1qm`z%lh%cCck5xS5R z^G3rm5f3m7O$LJ#Dhh}2A)fWx1YYvh;04j3g8CS0R97oeqxZE4l6r&ib4Sy^RNm- zle)}B_mAK7|Bgj|77MsqBp5mxLKquJP)}coBcTP}2r$nn*o&WY8c!4iu5^(OZ! z7Kv6>59dXTo43aB&BSRE_VYV`x;8j6x__CGRSXT5`1HV!H~8(mfNS zl<=9>1E7Rfi6RLE))i_ zEluTSPop`W6JOQgHVRElw~!vX;%h=HYM^asad2{WJLH?2xF0TU( z<_Ayt;_!_LX{rmhBGeT+kxx>=vp&=#d)bPNJC^aO1UnW>$S8 z=do&wnHxn=6>VmxVzaTy&>e6w=<29HRc!<=21iieZy8xjDWJ6BCUig{CwJ;i^?Qh>{BLD0MuUUpv?H|8Kx{YVw$`UMhOiKN*rd3GE{;sO{>H`8 zVy3wot?2o++6;D=qYBaZAvzHzo!Z)&z?FT-oOFtrLg9#{S+$`ao0o_HhpjyNNojG% zcQBJXV2+4CU;35hGOPCG9$ik2sx8auV@1WsvPFmB3H_iS0;e?3Iko25auEZ_*74TE znoeC(D{ImWRRXQQdUj$LHM>zOS!811e7Q9-7%nfeMxX%5KGX!)Q05QDQIJv=o`{y6 znwG6}QUKpUaUx}Sw_VGG)Wj{As%OKgzzQW9b#a`GB6>)$Rl?s91mJ_DgYlBjNZH!Z zDK|JbOaVf5GP*RgR+lyk1)Ca+p1Cj$qTZMtN%=)tMs})d0}s%2EG1$RN=2FWUY;ua zI5$a3RMA!K3-mL%9LAWhv(?tn1X5&`CfT-N5M=cxR4Av@r(nczyt(bgU|)yD5t@rjx3Vf6imu_KpM*y zo0O5Yv4WO`EFDb_i(9IgfW#s*1H4a8Tg5$&b9h-!BfLN|%m2a+6M8qggluWr$Urep zq5mO#iD*VsEHpS|ZJ2-4e)HOfK+zg3wIYFpDtQ{l!-D7$g zE`N^>?#xh~`Y!M_EKI^LijO6!5A7S+cOc2fRODvJ#%C$5=aI2$@id3s+Z1}6uzF=v zK-VXx*@~8|@5K>_mS0hAYx&>=N4pysXNx76$?F2+lCH<`Nf9Hqg@a zFe@J{ttOjju%}hsb5pYqQ$imq4DI$qICP1XG~?FQjg5j}N60(aPV2zsV6kSButxafs=iP^I&x7|NmLgp zq?bi0I9kmajE&YO>{IR?II57KHz3P84p`kTQ7z|4XRz6n>GTqH(2`<`sJc`_D>C$c zsfr?DE!CKLISPKMs?@wJl`?Q>GY%GRoE_iHck1*ift(o<9G63xH~@>+1IH(p&WX!} z)(kA)Kn3bO2lXR;95k+p0t^06N#`sdM+_qnL%q2C7$CYwo z{gl>_wuDUjJ3h-Rjl{c2fgs(_SVX+nh{cz@*fyz>D#f z@RY$|=!^wr7U5&irzOG70Z%J0;Z5DovlLG@=v7lfCp1bfqnwNi^9_7Q;1~zn(QhaV z-1rj`a!o;Tq#Cdl9M~Eiwj5V;Vum45t_II5moq17++Y^|hny`VE~22bI?1^?E{jD+ zlXA`FO5$pSET5(bY_VY?C$r!hW5Pjo8OL=~h+n$}B$8Z`1n*+(p zgfmxo+*BTikW-u@Meo;r`E-hvLJA8IEIy zbFpez5kQgKy~fndl;g2@oCwO+Jsc}od9pDi=-JgVJ@OtGP2d!5VjrGbU3A=CkF9Eo z`m@1W$IApmp~c73^?Efl#WQjUgMbm@1n^8Y=(Q(h{gb5OpaN+150ZvM3S$ylK$Ik1 zof3{Ij8*G$1AiFjaB(1*Oz{bvLGwBF(9U

$9&oy#OY+f&Tp2^9?DwKaK~3TTUx?gqO(2QJk6J>rdn=8$=ZJxQrayrDP(EhdH2P z4@Jep>o8aYyhc@RxVXXeKpPBGs?O&g9L;J@HFvc`F^H|@9!c=XSjysO*bCfkW4DC_ zqTVqCWkM`PE4MStv9&Nh!Bd^V*3Ik)Go@C5vua|?z(wh{-r!Vh6VIja=Acwqk?O%k zDa}wioMM;5{A@txF$vTuwhnvLsq>LW>wH9b;o=Ll`4ndrZ&JWILFwn=Cr}EpKp+e& z14eCS(I7Dk60Ogn2@a2{%*t`WkjH+9%{vRs=pm+T2CkTh61N26+`ycGLzK3QAgZ=` z?6H~XH%6l8Dt=F2D*W~P_H-#5iO9983Zqkb>>uL+ML;hrN97#o;pPsHoh|LRB7~R0Ohf4 zni-O@*Ub{t2(Jz(=`bcIn3ZPvIkl#^T3f>TJ}3WigM`I5A4D8tv?{8vx|K&6qkPa@ z>3td|C*1#wN(x(KQG9qABc?N!JxzO9BEzH!$dL)G0VHaWbYvcpnlkaNa@I)5bW5y9 zBd8L5Vu%UUs$xsslShYw+GMhXWGy)cZ4H4e?je~N#DyNjOEJ_OVIJs9Dnrk;3o`52 z1)H^r?TGt^qDiyBKg5Fpd|QKv7?h8i=`5WCjSVIGT>&cR(TS?O!_wkKTvmpbG+FNi zvGj0ULshHhXdH53FI}Q*v<9qB@4&MfXkldSAGj?O(G4q0i9TXRd9>Y%(uteKgDkAU zkg_A+f*U@>D;QE1JykO8P>IJf0Wt33VFyycWawB4(7@PN8Jrn__*o;^ zm5CxHneZ~OO}p?Af@R=5G9l3{{hRYyLRgNwrhM>H2E)q3kjj3nns*&2gZci-xYRfY z0>L9>X_e4!>%E{%WDcjeiDHS&Ed_a;%T*%I7ffCl20l|Gi}jrQg7bltM=&%#2a-Yn zxs@fJ_76E_v{J78>ajiCYZwOz<8KY;G+$5Lm0p60sYq@S+0hmQ7Qs}7BMPuogL>>z z9`VBvikeI+8Wdx;;lT>1x}>W?bS&1o`{0XOCQL0{T_cJSeuV6Rf^%fB?8GWeyl7(J z^-f?YIX%n6lFS}o3AHFe*TyLu}q1=!)xm9I2L>5PcRQ1abi1h(Y0| zvYL-+&~I&Yct=hSnBWvAF3rP1cUYypcs^8%K$ckqf{Le5MNxC@+tVjS;QL|0h36(% zh>zw-D_lLG2&`!%cLX#XW5MEv>l+r~ZLwO0CsR=dYHNuySX`zua(%26qJ}^rIkLG# zL!c*>(ib^vF~~lPsnfKGCLU>zK9TaSI-}*36$~pb>Z52e*PNa-(qhUEzPV~qF)C`iaO4f1uNDf(48zGKLD3UgXmnNF08EEoQ#qnVK)6Z}y_A3xz1O&`An9 zA#PMk4Kt}e5DBFo&)%YntS-MuKYrk%x~4jz$QQ;4s(q0%MsE?77VQ}r?FrE&^M67; zpd;DkfLpx3EaR9zuB2nzs@+2kEgm}5P1H(Z2N-e=Zx+EQ-+UTCIbcGj(W-D$NknQ@ zc2EEyhvnXVa|o~y(FRcI+4)MDgo@xyYGD9pAEhw3yEk7rC?`eZ0gJ>c8Cq71HTK@c z^|^#xv2&NGAHI~Ja~Z9ENzIK?peQEfis1oS&r$i=l3`I@L89xVBA0h-zB2jQl3~fp zFdw!qs!VQTO#G9dxys}c$CM!sDF$0oa(TDqdnP|yGA!9M4F0x7J;TeNJkJ7qvD}l< zmv6ENfaJ`zW5Xn=xi+s_)X)V>HiX5CG$h|_2!P~Vx*<)rLSIO?p!ei!umc#-yih&)3PQ{#S}|_CxvohR&4P6LRAgxG>U_@J zTK(#L8<*y@hTw+OBk^b5XwRdwOY?Q;(US@TCi&WU&Q{iP1Nj0m;3FGv+#==Uf!pHGW z>h0L^K}I+vb9llqUf#z&0D0&YTL67}&ylzu;xi$$ZZGv^Qh_+t!4Jn&+=B4`rcjYA zyB*3>g>h8wLaFv6kgk8mBzsUimIa>I__{4i0cpaSfjk*A_ zCbAR0BK826yu|^fA@vB~0D!!P@4hzw!LQDB_UY?u>sx%)1Bzqv%4!5x9Y}*Zd~rIG zlky}tzQECRqXK^GhBtt&Rj7RNjSrBJz)38I^*LHBFz{0D@Q!ADgA-#E<|Xf0K+Ram zwO)o1^@t2I*Xyhg%xLKvh>3SSNIzOSAd?&kp#DL3;5WJd1(D&&$!T6>g-d=B^Qa-Z zAp#mMaOWh#Y?P?3_EQF77QP4H2B9p_wfa!N$t_kg<8+-X0fRkTckJ!~ODYg@Q4i1$ zgN6gOqb|Oo9+)rSuo6c`EC84fri%GA{(#xVd;y1{XN8&%CW`quZGeO~ z$BYrF+6unpmTfyX#^N#qP#!8Db2uYwK*wWA}2e$`4irlgoz>(1UHHIIA{k{as|0}dM=&?3Eg7x$yp?T=y?yCj(pq$6bBNeNRzs79)A-^9hjTz;^0Xe|H8O#PVz=}T z7EbaxC@3X{_t%jaK7|1cyq<8jr)o1Woa;d$o>D=5V7(I;**T$N<4GS1_f*W6SAep8 zBl++?t{$>4_JIe1*oWZ>e@9@rmxP*}3r!V~a4AW+;t72T{A9!%*TT|qZuZ8vg!0f8 z*hRG(-w`UZNpXS6p?EU(-Dq#C{C7%>tenYLbTi6JixlY@BamlOgxqqxDJM2i;9_08 zoW%De;l7c|!$ThBQF+6NMdVatB#p_GFhR2s(Q%dp&qJQiad?+3OpE7&Ng_ER}&h zq0@~fmj2p;xrEpV9F>CwLQAy!b#BkA1%<|=+Q3D9xS?=r1XdV1G37iJII@eyt(x18 zwzj@1uF}P|*Ld;*CTe`zUI-Fy%(P@o(jmU>2p)y30Pu~$-KUjND%o-s*Exfh_|%vw zDM)o~XiAk=*sb86f$BRjEb6WpyFzXDGGm@d1hcErDGKKit213<6`4_QOu|vMT20^4 zh0X=7%CXj)ld2>8n+&cgucC$mGU!1(6)iRoya_ia$rcj{hTtHa(H4f27>Y_qTMu4p zf_=_Wks?-XVu;{bb9$fL?kOy@i<|M}8V*U|(*D?Li;%_J+FJ1Fi+$fCKAlaISXm0Siqa%3z_K&z_SKpAYF2Xl&&>i8{Hwf zCx@HU2N2!#Nsw+(LUjFd1Hx)m6Qlc!-du&Pvc7(&y zaKmH)+7Xwx;Qd>xE=Zk+ek!7(q$(N;aMRFh1QhPV^sictgxOjvsTXg`%RChr=`ZInv~+7Jzm>{P^EJ&;cvD}gpp>hcZ-91-M`c?qr2Q9;f=?E1xB z+Un3a0&C+OhB3I~SPt?W&Hz1gEV%Tz5@4)*84r#THSF3-UM{+405Vj`3qP@a3vtfk zbG#B!(vS~xvBq#Gp8P~17yRKPPf{SQ-t&}XlBet)kFCl+J3BTuB^%WAa6^POiboa#9>Q4;qsBUB zw1d(O?K!HD{-E2Th%hx32{cLD;UqCH%Eb{3JkT#a!EnUr7B|N=-iz@?-~!fThVkOa z7Cz366x6P4k+uS5Doqw_R*4EfL%lruEH%LG&v*=3ecV`Q?1d17kz#J)T`qT|&&C=T z!deV0O0sWPJQs`rSY+*^_FQYA$Xa8|aduFS(v3eDGA}(ru%e`}ko0BgA3Vo*d>+fXQ;04+8Vk z9Oho;d8DuB?}3@mVg8wU9_91=JM-i)$6=rjpXl>EmP3b6AzPjmS;G>aRV;-x$s_6O zfjN@HoD0kb{G~is5Ci#1m|0>V2?_HuV2)q}6Xp%ToQP3Qm_GyNa1QfPV3uQ)Q=TsZ zGoQo!BQQrW?kUgxtOsM7FejpSZ5XeFc^WVSIZO{QH|8+gftknXq*BiTW&vDHm?khY zkMWr6ff;zL$J`7|+v6jqf#*5v2iLTNh>SBBiTL~#zE1q@n1`5;19Qdic+6LTS$>Mg z{1})G%Oj>5E~>imsS(3^jzc3qe459c1k4pHJZ1&+; zm=jO+rLG3%|10i$prpL2Jiksb6Ahb`iX%E0OT!p;@_RW<3(Fiz{>E-LP6XE+geiBl@cV#4leH$-uQ*)}pODoNT5CLCpU`s{K> zvcqJ1X4w_bka2(ay?5XDZrvs!p1Awvgs$)R-S>O<-gn=9f4;BkOFIP3g5z^QGB;Zp zi+l;lm_WV;B)cf&c^pXP{b8v|;#nNZJO?CN5+W}Wk3b?s<)lE~4rE3kOMs-7g*+WV z@&dUBNH!D7d<@9AKz0$&EuqY3fK1*NB3}eDlMRu>Kq|L~$T1-K&JcM9$hbiM3dp2D z{sBm8O<3xeK&CzvBG;iu=GTSD+kuR&50RUJO#WVov;&DYhR9kVDUX;R6j{1Fnh!BF zK!hO0aSvoBFw`V{XMmUm@^K*PO`*(ZNk$+K0*P{=%zpwhCXnxu%|eK=xi1_2f5 z^8#eDUIv$$WPIh5P2t%ia;_z8h3>t@RDt9?f_Zzbj>;<@Ar1c!GUFe%rEG8d43M#pgggyG@*(mKAgP`Z`7V$~Aw-@d znH?eWUxB0@v#smrB;%MZ@)o4EoQnjD+yrFOjUZP=OK~5Cte^uRMxuX%I6qm=TrZ8DOO$E-5Hbs&&3Pyn7>XRmYVM+T8QHG z>Xz$nh$1w5aPWNW-vYDqF4md-`?QzFYg zOu=*3hXcxb_Tf_~miW+pO);U0K8H&3Zw==y4+ELT2y3h`l8=*&K)ypVZw_Tn0vQv? zPk}^l3uXQ%kTiQ$X>67HC1mnWhHS&c3%+g$WC4)Lk8umeAF^NB%OT7uZ`fA888Tx6 z`2djA&QN9zG2hQTRM+nVN&i8Jd<4k2K>CSiS12Q$>-6EZ#Pi5?EiRrLE28K|$>VYY&7W;N(;N!Jc5rY!Q&zI^?7rpGv_|_USehSm z-ndMR&Bg3>oMlWM?*cOM30um>&tf2Be;gvKfJD1PD!`O^@2 z2FRE|{u;=HK>iNMj6hxnlHJEVvH6|?N`(k^%TeV6UdDXw0Hs32$c%r%%b0H$pr3k# zlztP|LzyoIGPV?kv}A5G4>HppiOtsqFcST{Q07L+OdMbYGKMGhB}S;OMUcsRBsO0U zfIc1}ndOj)8e9r8hG*={jF8MK$V_@9Hs1_DKKx23vlcSBuQCD|!_zp(2=U|~GwqSs zd?^4vd?=K;2Qrn18G(%9nfdpO5Kj>@nL~`k=KBEXxgH^zGGr#d7RVT$?4yj3%m`%0 zJQADd{h^^pNamA}sr&~n1sTgT!3fFhh0LT!V)HmZc)lLWJU~2;F#;LO^9@Fbr$Ia( ziOo~`;Q1!Y05PR1M;W0~kDydTAV+{?zQr=Jc|ITB4-vyN?GfU65~ZR)=Td~605a~8 z*gU$As0$H1s({{9T0cex&_>No4-e<0%Hjrh_wowu3YOF0{-YLW(p0 z6h;|T0)75~61O-w^SArVuO3l1@@HV_jI;eJVNpW&B4lznO|tq0AgSZlhlzhI59u=l zX@~>|`={{^_D^upePsXmx>_#ppA!$Ue-6yQ-1=gTx1Dc`$#L&$jNjAFirGeCQtKU% zsr*H=rM47VO1pQ`ZLh`BJZdmrl@wHQ#F5ZA%$9az0`LZNjsM|RzQF*!i~-3AJ%w%F^JLf4|BU|SgInh# zLS{bpo|A)R@uPKLj~<~?&qC%2f&3K6_XP4Y zAW7|1>hFP^7BX`XKW{wAR*=l~Kw^QsQz7pI@-`u}9LReF@=UUyZp>G1K5BG;GCSC6Tr{*Lmc!-k7hLCzQ^V zX+>Ra>#o2X*Qyq5D4jQ`796>P7CfhP9#hPV74tdtgje(P2Bq^^#XPQ}^ zK3uUt`56Pglxf?BBCp20Q0aV3>D;GuUT_6EUs5{nQ#xl=Y)oAH>N+1)%sIt;NqHmx z8?VkhSH;Fb#hg<*&$)tljubPkGen!Vb}Hr*SKy7bYQdwb1&gW$>Aty)jhxD&8O3}; zG1nDyw_?sK=98)gi^>}*<&6o|f)&MFRDQmsm=7uD^D2vuDdrKyoKoI6rkFRVEIO{3 z>xy}7c1uDin*egvnnvD?upwfAj()payxliewSIiTNd7Wau zpqLLS=89rIs+e<%`I2Hjt(eCY^C`tVpqS^YK3Gw`dt5Qo{nMsCsF;r{op-CA(ooF1 z74rhcd_plFRLm2K`G|_T4Jzs`s;JAUsGC&Gi&Uf<&A3<^AY8Z zg~}V#ig`dW&r!^@FDzQQjqU@k-SG4E5} zSggEpT6yD;@Tv5!I6!SjCoK?*0u5b49 zm}1_q{G3sKKBN46T={uiF)vUV>4?%fuXH}6bZ#i-`HFc$F>h1M#}xB~VxCrh9#PB- z74uQmf|H8*xMJR>n6FjL^Az)xVs0qrcEx;FWl?&%SoXnb#XPBEW2=gdIf{8q<+>Tg zoK-rXR?NE<^CC54jF1o^mh335oKQNiQ_L4sJhv;IQ;K<9>AXn!`IOT6tQxJ-ius~q z9#_n(RBW`Wc+M#11B$s-F;6R<+f{6274wOdj~~0cyCSdrJXbL{l%I2od7sjGvC{dp zVvdx~dBwa-={!&AJgH)%Rpq*hVm_^yD~fr6()ptD#+YL6R^B+TnD;B@-KwXIE9SFG z=Us|-so1|pj6{> zrj7ZwifX|r#k@!9oK`xYRLsXz-k4U*4W)Ct()p~4=kqF_XB2Zv>3m2rcPpLgNmDtd zXyz%UbDv_KrVx}L9~@K6XB2Z?F{hQzb5(4NE9M2t8%Gp#UNLV| z-e@T1^NM-5YQc!?0bEoq2lwv-tm^&5ojM90(8eyju^C{(x0p*SPiusJn{4n6FjLoig}J=KA@QC8BEFCr$*Ru z#oVgAaZ>qtM0w+k9#a(ae#M+oI-gO@^o@AwjS0oPPI+TmG4EC0m{NY8RLp0U&bt)z zBE@`K<&7!Dyh`bON->Wq=KV_NX~o>Cm`^I^5yf0l%rlBPtC&wK=G}_vN=Du zE9NQ1yh}0fR?I2Iyim2^F%{2!D(d#Acup(kv|>K0n2U5ej}A_xB8&2mUloS zhy=&4W_vbZuphauDN}p6le|IAcFr|tlGc47kz?fve5F8+JJX7}Q!!5~=Dmt}LNU)M z=5>nstYRk5OP%Q#9Hh?lb#@AZY`uykgEM=0%G6xMHp-=52~OtC(9A^OR!VrI>ds=A2?KDrV}$A`)5;E9R7H z!MtMLrkF1(oewJJi%RE;V$Lh(ON#lBVxFUP9#hN<74xKG9#G6VrSrIA&M4-Sig`pa zk0|B_Gq+G1$&~zBvR?zGO@5XXK=G`;T|2BCVVzq@QVKem6>~XSE-%x16s1i>U0$Yb z5w}hY2E?X=v+)Gm?loM+8ZxrtwDRh1uAODt_9?GUD&~2Lc|tL7Q_S-`Gnr7p;522J z@rw%5eRSD>C&J3mh=%1!)A72ssrz2Qw6b)8vh=93vvVAL9r0v&w+o^(2!qTi^ ze&trSGGu8Eu`u81X8*hvOR2Q+a|Lla}^*OPRT$nCB|y zLyEauF<*oUwk|T^i}0akp52Ee^9c1OQ#)jQ-|_mi+yxmVqu_WgUd?;21_bUS+a8^6 z_Ue3P`w?Y(9xT$U3zSz=iaDp4&nxEriusgcPAlei#e7OJk16K8iaDd0vx@nQV&0>e z4|--?ivEInlVP4c61Xj_4D}`yZ#rJDb|>2Bp-=7#^szF(etmuheezeJkCplL>vIwM zRIWfDEA#8shht;R=!3d=v|5=~k$Fr-W*=Hx_S029GMRZiVRpaw<-cq1f62#Z5?^~! z?MKb{?C0iDtp}R*X(P`%ecbn;pFt`6{apIpTN(6y^!ivp3D1st-*EX}jKWD>ZVu33 zD`uOMG}|?sjQe#RZ|Q6@b`H6G5qq^|9zHeYXSa`io;5V{LunWLXFvOAHW}ximtMg? z+=BlPZEtP7lFrWd3$Jeb%07;t7HkDK9seKfT23m6p0eT~TgoxtnroI>PVFyCrFiY& zOC4*LX+6@6thlzh)XIV8-rc&dS)Uc>*w0+o${c$YHQ*cJ;K(`|6vlBg5Z>#y$nOG4 zKTC4}nt{-<3CP%d8`l;o13Bn4vKx=BTk5|6u}`8>U2&~a z#n^7(^}-SK=>YqMd-Oy_Pj-o0^x5)Tjcvd_}9HG@&XY1q!ZWv zcR*6lgqFSxgzpd9Qg1;EANN*}&hH0epOs}Ex(ksLUcCdz9#N_UWI*_N3<%$0v32bS z!Yf3JJOqReN{$ve0wg6`=V>7Jdt|K7G!R~Yq1}rE^n2?iyHmk3zkp1)(D3@Vq7wJ3 zIJT}d5c`Axms$y=F09xLgul^jOZ5Wj7BYVX#QyRC^L!piggE=IJC4Ah`rCq8g>K832*cQSto4&1dz&eA7pu%Vgkc^0pERYeQ5B;tW9Z4VD4rHFN>wX}U0{I+}jHv5TAoRpt zV(GVm@b0&@;s-#EiBi)*D#EKXK*od*-+*7q;=2`==dD2aHw`SZ6v&upuipbQBbe_2 zvRl;tF(8TUF#>TQjt!Dz$WJ(#1bG06{f0HS;3Gh?!av^zaz-G}0^uiYt4`$V>x~z3Kk|;X7Hj)Ej;ac|*kEZvvSkEL{YoA(*p3j(C|E zuP!fCMk9NNj@zOaGQ6*4>$)FEUhsSx$X3rop7{!p6T*kbfY|jZGe1oTq7!0>a;lwv zn9tsMWSPH(Ov-zen12pr66;ga#p?5>1wef5af}~}s^A4&&xd{R-H^$P7HkF55FT3% zgm?8V^G1?E3?+5l17yFii}Wc2;pYl$sn1iX(_!s@284g-#L7GdWI)vZT_7{Kn9PKq z12QFg+|PiV7G8ZB2>&9GWxnBEs9of--vV;j>x{_6+q{OV84;OFAu}fG$^yxW46qeQ zBxK5j2(R7`WS7wJ^CW{K$%kJC(&=U5*t|`?N6u2d37M2&{yq@Cb7U=j0mvoKLv{Q; zkYfV5?ncc01acz~OiKbkw*lEIWYz;27df*R2wZ6XN37mgsNv;j=4Z~6HPI&^vrX{q z142h)`@=xy`nI5JlR)_CS!>rRAUV;${uT(|tF>4+;h*P$WJM&r1Z0CK)slwMqSQPflenRlc;mN$x)TVmsjU?~ zKr+ILoj|5>G3lqffzXj4Uq~ay1oIIf{JT$<`FlXl3YotGGAWqP1DWxjk8oT9LPjOy z&JFK@>Fy~?w8i$q5K?$FDSG#NAhTH1^+6!hxQ#-(Sba7DiA1I?0O8*RvofPVCWJRW z4~wJdi~R59y46&I_IYH)OJ6{q?4WQFIO@NgtdKWZ%^x(h8*Ey+O6E1;S6NSdVQ1 zk{7MB14v3l-7X-lg85H?&=f7{m5&-E%>3U5fg4TMZ50>QWT{g1~MjO9s_br=<@?0 zbR@C*0+6gI^&*grsQqOi<050f`F*HM)P56?NR;{jkerB(E+FF~lhlD65Imm-vR5Dv z0pZ`%gE2V11*8HK=HRnMjuRqE{e(&hR6V;~jL7B2#^zp;KxUiws#)B??uO8L7&3ichWxS@$XwyE2Z3BGWWEk$Mp*GAkkfEg z5@SCCLI(=ER(=sk{x?FIe*}^f%y0P}$cSF~J|O&riEWD&Kz0j{Z3ePR@O%_VvO*+1 z_5$I%LYC(%K&GJ&omu2DApDzz7Wn~?yy&?=9PvfE*A=5y-gk{0NW<;jvEvnJ-Fx4M-yuTKX*@DIY(i$J0Qvg6AhdrUmjh zK+?V!`~qijZU{G7@OssrT3j9;++L~*s8;&eFy3%pisFt={BkWx;SJI|LMi;rJr?h^ zC|+Hy_KcE5QpiGHqGRoFxsa<@=`{tmjAb1x)%%8upx{%|gHO9hD|i=b$ZDhv>>sD{> zisCKh{-H7x)~myXdgA9Udv0qpg2qsLjwRgiK#AN0pIga9q+Gg>K~uz>H(gE|B=oy$ zPE*QyKSYui3sHK1Jt8n6tXF$>A{NL;#stHL*y*X)t3%G_CM;}8qiqPFXk0fwPnwo6 zyp0#thD|#gWL~Wt%281VnlLe{s7gaylhBZ;3l9mpunIvN2uVG`c_>yGD#LH+*frM| zs=Z`3bt=~h6AIhw5>MOnuqooAUER5sp;u?!?$Ur22GCXNEes9g_tIT2E)Vtf)cW|$ z=r%N5H+_`Bwdkd{_tZ+8wtqB=*Vn8mUBlZ+HI_ldW1GR&0&g>HK7Oc#7gP5a>_y{s zHjWa8sj37QpbRG-YA@`875YkrdjlAi%m$M*G6*@YNm!#Ki&sE5IhlH30Gm_u)Jmd~ z)SKQ|7t|Fr3Y);~!skiTFj6g5V6*G;(h&}%*T8ea7N6^-LB@NKIs2Q=YXki>7TAo4 z2~=+#4Q$V3J*G)agh8274=QsBjNIfpj6-bw?u6VWFH2r07DN;Dbzpg@e1D0Of;+cv z;PK6x!e@zbj8M(!H1$%M#xh1pU5!eeSk@ZCXJHbEy=t{Oo@wZf$sr`}-WMIk!9Wvu zC_n?pSbw{w;OgAzeL)2qN8{WmGIaqvbn)ui-TlQ9CAcUS7pM;=mpZVGQ!n{chDbES z{_6T_JC?#T^iakc+D1mV1lnEgDU|3Pix~CntJLEig)*XoHUTIi>ll?f5Y<-3TJ25| zVCC43K6u>2x|*^*jbpU0Xh-gzf&Sj|Alk*A*81-+4ehX>&Fk=-lvR+gNk^#~1hl-H$aJr|}Nw40l>6mI?z2=FB1>!oE06HU;pR4MRCj>aGo8NKEaA zDV0Ow^>=n`UbB|ZI@j2Pi@D|v91ai)8F9-Uh^Vky;I z1P$@goFkbU3|H`6T`9TVv9`Z7P^6`fZ4UdoSG@WYFHuK2G#9DYkUstJ!2P8n9<%2w zWM^lo2In|Y*yKN>clgg?$~2rlVVW~KFG!*)oT4wp*tKtJ9~8*fD54i)^B#MyCAZ*R5Kizb8oj{cb6 zZ4{hgd_!&wFJiN=&{r%~ENTZYx=vkSxLS(4KHPN|0t~}+Sk9}()nN9;MFKgh&d$P! z(#Syh2(L;h4h>V(;1hzgH#tN0LkQloovzr=%Y)B&c?_ur4yzYBbyO>0gg38w!6$yb z3!(gC7bjs}>Eb@S`fSla0ngRe>Lk5B_L5gAJPoz#?(A5-VFPXPIy7{nLdhe>!E|L= zVYIhc?cZ5iR#-NOhAfrQ%X*g;%!I`KS&~_L8~$rsRvQ|w7D~$o`nSU)%WBoavXS<- zW%u=$Z(Y8$E20uPg;A&?UDlU9t3*+m`gVwYM!zuOjh_RF0UOJ>Z!*$4EC)w0Cc&{fw+4Z%W25Po44HAy?*1GIM=mi zOXsF|^O_BtI^qrMySj15!dJ3pq!4emcJMsHRd0A`ak1XaJ56tOJ%D4KX?E7i_VG}o zh@Dl@5Mgy*T`qQE{9s`}R?(=PjQ*Gkz!)qpS%0HO;)XkE=^N9-;Ru(WYTXzScXn^u zJgZH2^$ZM`*rM_v+yOpHf)yH`*~J@>u*{&nrCh@xUn&xiT(&}ndiV{s~GzzV*Ni{ySenS`5*rj+)neuor z++&$GyuC1h33L<(jlkxu!R#G#r2cw$AEuJDr^8pAWzjT*R;Rf(+=@x%3R!Ku@Ps<@1l=~$%4SctA0cZTQ#mrOexI@`8uGlTHAhIwS zEcDb7Rh=d3w61hu5j)wm3{;8l#H0xzMx#K!slSBrO?e!WGz*L|hlRO?drd;XB7ypj zyny9rsk*a-E5u<&Y8osdH`w#hp=w+k-d-E6p%cYCohFVLStcIA)P#N6WXSAR8Xs|B zx#h%`j;`e{h|TgMa9$0|1B`!M8zxM?wz$xDZ>^4nLQ}m-NRSJxmi_KNrdc)iOiU` zy5{{A<9j?|NM3%#T61S-IBKcn;?;yAu#r1xU#X`ObX-J~8=PFu<5)<1z`2Oqv4fK9 zMz@ok8$~m@NqbW-yA?ybSAs@1E(6q6x|tiv4j44@ZbC-DWcEHDsAOlNu^nBBy=}vm zJHI34unN=}MyID@w(nK~2Rq|tA8HXgSYW$;!F9SQU)NI$=OAvM!}K$&kCnF%JbQ(C zyMtAxNZ(W_3|BGP3wlP9aoAS(SQihi-6)?m_++1AW17Tz|KfVNc0yKiT~hvxVg6q1c9?JFZPZOnr)(spHWqI_PChLCKQM zqvDz}?YR}@AVR`^Ht1WzR=`B6hw%kJAKP5Q9TK`7;$}7Ic&1UH_T^Jq;#b52ZIsyC zaH=z5-f}yw{me|Zsk>O!{4Pt_2kEI>)Ndxr9M~?lYyzRIlK9m$(d?s5+#x2{Hfz~C z4Q{4O^PM^kwlvT1naX`tENH24rq52KK&N_La9H-x2{bk|8$fkgW8F9z4knlB8*ZwN z(U^ug_MAh-NJm`@Vl3hkD0UxQesFFy-QLlWkz^fp4BW9YH!`e+?!j{(z~0fx=;!!s zUm{<0+MN_%h7>Yeyl5{BWkt0HN!pP_JM8^BAI_v0MHkJE5dKK}n3NXGwq>%biyO*A zMUj?>YHg`rz}*Dzf@I$4luoj*?^waG)&&k)4*OOL=Q6Bx%{~Tawy;bTVI-u5h}}}R z&CJmf`aEeL5umpD}+633b?F@ain9F85_Z@_E>x;Gr*2Hw_GyI&cQN z%y}7ews213Z3_% zmCSu;wIve5>RNL;P>Tz2_N7t}D@w z1FJTBuH$KRkWI1QV0|UVh+En)W|^B^Ni9&5pNl=j)80K$EGD0 z0!OW)W}W<+nO_$kd9aJ9#0#zlKWh@ZP<#_Gw9to`-Y44L~@+< zcbxqe(X5kU*hE7e58CYNSp&spW$SsDY4L1_-+u^HPF8wMXtb0YE6M!Vr)@h+4I_tc zm~nLEM#T)2shwZM$Yyun`Uh!66YjiWmQlnrb~^@oa7W88L8Px%*PN#~#@y{_*Qq(+ znpWtuwX1I$KkrKtwB2TOJ~JD&HZfsEB|5Iz!s7|xy;x$D25wy*_fX5zG+MfpD&!5c zWY^2{?ZjpkoN3p(Nv{nZWUpz*B@?`GcO9N_-lxILTluQ~G?~lo(&a||V9k5n+_Kg{ zGK$SjRt@OSw7cz{#?7LKP8YDeThcTl_;oPP9=*5R-9?|YXc=m6+LAWZGkKZ2f35nH(gN_%Rd0AWJm+Rf>mM1-7Uo3X0e3$4tPDNYaLE^)UZoMQ1L!mUpe>ZaRMcGm`v zh{;viWj8v*e1ZT`qYlqS#Tp?^S diff --git a/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/_endian.py b/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/_endian.py deleted file mode 100644 index 7de03760..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/_endian.py +++ /dev/null @@ -1,58 +0,0 @@ -#@PydevCodeAnalysisIgnore -import sys -from ctypes import * - -_array_type = type(c_int * 3) - -def _other_endian(typ): - """Return the type with the 'other' byte order. Simple types like - c_int and so on already have __ctype_be__ and __ctype_le__ - attributes which contain the types, for more complicated types - only arrays are supported. - """ - try: - return getattr(typ, _OTHER_ENDIAN) - except AttributeError: - if type(typ) == _array_type: - return _other_endian(typ._type_) * typ._length_ - raise TypeError("This type does not support other endian: %s" % typ) - -class _swapped_meta(type(Structure)): - def __setattr__(self, attrname, value): - if attrname == "_fields_": - fields = [] - for desc in value: - name = desc[0] - typ = desc[1] - rest = desc[2:] - fields.append((name, _other_endian(typ)) + rest) - value = fields - super(_swapped_meta, self).__setattr__(attrname, value) - -################################################################ - -# Note: The Structure metaclass checks for the *presence* (not the -# value!) of a _swapped_bytes_ attribute to determine the bit order in -# structures containing bit fields. - -if sys.byteorder == "little": - _OTHER_ENDIAN = "__ctype_be__" - - LittleEndianStructure = Structure - - class BigEndianStructure(Structure): - """Structure with big endian byte order""" - __metaclass__ = _swapped_meta - _swappedbytes_ = None - -elif sys.byteorder == "big": - _OTHER_ENDIAN = "__ctype_le__" - - BigEndianStructure = Structure - class LittleEndianStructure(Structure): - """Structure with little endian byte order""" - __metaclass__ = _swapped_meta - _swappedbytes_ = None - -else: - raise RuntimeError("Invalid byteorder") diff --git a/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/ctypes-README.txt b/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/ctypes-README.txt deleted file mode 100644 index bf8de1e8..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/ctypes-README.txt +++ /dev/null @@ -1,134 +0,0 @@ -(Note: this is a compiled distribution of ctypes, compiled for cygwin - to allow using the cygwin conversions directly from interpreterInfo. The tests - have been removed to reduce the added size. It is only used by PyDev on cygwin). - -Overview - - ctypes is a ffi (Foreign Function Interface) package for Python. - - It allows to call functions exposed from dlls/shared libraries and - has extensive facilities to create, access and manipulate simpole - and complicated C data types transparently from Python - in other - words: wrap libraries in pure Python. - - ctypes runs on Windows, MacOS X, Linux, Solaris, FreeBSD. It may - also run on other systems, provided that libffi supports this - platform. - - On Windows, ctypes contains (the beginning of) a COM framework - mainly targetted to use and implement custom COM interfaces. - - -News - - ctypes now uses the same code base and libffi on all platforms. - For easier installation, the libffi sources are now included in - the source distribution - no need to find, build, and install a - compatible libffi version. - - -Requirements - - ctypes 0.9 requires Python 2.3 or higher, since it makes intensive - use of the new type system. - - ctypes uses libffi, which is copyright Red Hat, Inc. Complete - license see below. - - -Installation - - Windows - - On Windows, it is the easiest to download the executable - installer for your Python version and execute this. - - Installation from source - - Separate source distributions are available for windows and - non-windows systems. Please use the .zip file for Windows (it - contains the ctypes.com framework), and use the .tar.gz file - for non-Windows systems (it contains the complete - cross-platform libffi sources). - - To install ctypes from source, unpack the distribution, enter - the ctypes-0.9.x source directory, and enter - - python setup.py build - - This will build the Python extension modules. A C compiler is - required. On OS X, the segment attribute live_support must be - defined. If your compiler doesn't know about it, upgrade or - set the environment variable CCASFLAGS="-Dno_live_support". - - To run the supplied tests, enter - - python setup.py test - - To install ctypes, enter - - python setup.py install --help - - to see the avaibable options, and finally - - python setup.py install [options] - - - For Windows CE, a project file is provided in - wince\_ctypes.vcw. MS embedded Visual C 4.0 is required to - build the extension modules. - - -Additional notes - - Current version: 0.9.9.3 - - Homepage: http://starship.python.net/crew/theller/ctypes.html - - -ctypes license - - Copyright (c) 2000, 2001, 2002, 2003, 2004, 2005, 2006 Thomas Heller - - Permission is hereby granted, free of charge, to any person - obtaining a copy of this software and associated documentation files - (the "Software"), to deal in the Software without restriction, - including without limitation the rights to use, copy, modify, merge, - publish, distribute, sublicense, and/or sell copies of the Software, - and to permit persons to whom the Software is furnished to do so, - subject to the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. - -libffi license - - libffi - Copyright (c) 1996-2003 Red Hat, Inc. - - Permission is hereby granted, free of charge, to any person - obtaining a copy of this software and associated documentation files - (the ``Software''), to deal in the Software without restriction, - including without limitation the rights to use, copy, modify, merge, - publish, distribute, sublicense, and/or sell copies of the Software, - and to permit persons to whom the Software is furnished to do so, - subject to the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL CYGNUS SOLUTIONS BE LIABLE FOR - ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/macholib/.cvsignore b/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/macholib/.cvsignore deleted file mode 100644 index 0d20b648..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/macholib/.cvsignore +++ /dev/null @@ -1 +0,0 @@ -*.pyc diff --git a/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/macholib/__init__.py b/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/macholib/__init__.py deleted file mode 100644 index 5621defc..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/macholib/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -""" -Enough Mach-O to make your head spin. - -See the relevant header files in /usr/include/mach-o - -And also Apple's documentation. -""" - -__version__ = '1.0' diff --git a/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/macholib/dyld.py b/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/macholib/dyld.py deleted file mode 100644 index 85073aac..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/macholib/dyld.py +++ /dev/null @@ -1,167 +0,0 @@ -#@PydevCodeAnalysisIgnore -""" -dyld emulation -""" - -import os -from framework import framework_info -from dylib import dylib_info -from itertools import * - -__all__ = [ - 'dyld_find', 'framework_find', - 'framework_info', 'dylib_info', -] - -# These are the defaults as per man dyld(1) -# -DEFAULT_FRAMEWORK_FALLBACK = [ - os.path.expanduser("~/Library/Frameworks"), - "/Library/Frameworks", - "/Network/Library/Frameworks", - "/System/Library/Frameworks", -] - -DEFAULT_LIBRARY_FALLBACK = [ - os.path.expanduser("~/lib"), - "/usr/local/lib", - "/lib", - "/usr/lib", -] - -def ensure_utf8(s): - """Not all of PyObjC and Python understand unicode paths very well yet""" - if isinstance(s, unicode): - return s.encode('utf8') - return s - -def dyld_env(env, var): - if env is None: - env = os.environ - rval = env.get(var) - if rval is None: - return [] - return rval.split(':') - -def dyld_image_suffix(env=None): - if env is None: - env = os.environ - return env.get('DYLD_IMAGE_SUFFIX') - -def dyld_framework_path(env=None): - return dyld_env(env, 'DYLD_FRAMEWORK_PATH') - -def dyld_library_path(env=None): - return dyld_env(env, 'DYLD_LIBRARY_PATH') - -def dyld_fallback_framework_path(env=None): - return dyld_env(env, 'DYLD_FALLBACK_FRAMEWORK_PATH') - -def dyld_fallback_library_path(env=None): - return dyld_env(env, 'DYLD_FALLBACK_LIBRARY_PATH') - -def dyld_image_suffix_search(iterator, env=None): - """For a potential path iterator, add DYLD_IMAGE_SUFFIX semantics""" - suffix = dyld_image_suffix(env) - if suffix is None: - return iterator - def _inject(iterator=iterator, suffix=suffix): - for path in iterator: - if path.endswith('.dylib'): - yield path[:-len('.dylib')] + suffix + '.dylib' - else: - yield path + suffix - yield path - return _inject() - -def dyld_override_search(name, env=None): - # If DYLD_FRAMEWORK_PATH is set and this dylib_name is a - # framework name, use the first file that exists in the framework - # path if any. If there is none go on to search the DYLD_LIBRARY_PATH - # if any. - - framework = framework_info(name) - - if framework is not None: - for path in dyld_framework_path(env): - yield os.path.join(path, framework['name']) - - # If DYLD_LIBRARY_PATH is set then use the first file that exists - # in the path. If none use the original name. - for path in dyld_library_path(env): - yield os.path.join(path, os.path.basename(name)) - -def dyld_executable_path_search(name, executable_path=None): - # If we haven't done any searching and found a library and the - # dylib_name starts with "@executable_path/" then construct the - # library name. - if name.startswith('@executable_path/') and executable_path is not None: - yield os.path.join(executable_path, name[len('@executable_path/'):]) - -def dyld_default_search(name, env=None): - yield name - - framework = framework_info(name) - - if framework is not None: - fallback_framework_path = dyld_fallback_framework_path(env) - for path in fallback_framework_path: - yield os.path.join(path, framework['name']) - - fallback_library_path = dyld_fallback_library_path(env) - for path in fallback_library_path: - yield os.path.join(path, os.path.basename(name)) - - if framework is not None and not fallback_framework_path: - for path in DEFAULT_FRAMEWORK_FALLBACK: - yield os.path.join(path, framework['name']) - - if not fallback_library_path: - for path in DEFAULT_LIBRARY_FALLBACK: - yield os.path.join(path, os.path.basename(name)) - -def dyld_find(name, executable_path=None, env=None): - """ - Find a library or framework using dyld semantics - """ - name = ensure_utf8(name) - executable_path = ensure_utf8(executable_path) - for path in dyld_image_suffix_search(chain( - dyld_override_search(name, env), - dyld_executable_path_search(name, executable_path), - dyld_default_search(name, env), - ), env): - if os.path.isfile(path): - return path - raise ValueError, "dylib %s could not be found" % (name,) - -def framework_find(fn, executable_path=None, env=None): - """ - Find a framework using dyld semantics in a very loose manner. - - Will take input such as: - Python - Python.framework - Python.framework/Versions/Current - """ - try: - return dyld_find(fn, executable_path=executable_path, env=env) - except ValueError: - pass - fmwk_index = fn.rfind('.framework') - if fmwk_index == -1: - fmwk_index = len(fn) - fn += '.framework' - fn = os.path.join(fn, os.path.basename(fn[:fmwk_index])) - try: - return dyld_find(fn, executable_path=executable_path, env=env) - except ValueError: - raise e - -def test_dyld_find(): - env = {} - assert dyld_find('libSystem.dylib') == '/usr/lib/libSystem.dylib' - assert dyld_find('System.framework/System') == '/System/Library/Frameworks/System.framework/System' - -if __name__ == '__main__': - test_dyld_find() diff --git a/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/macholib/dylib.py b/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/macholib/dylib.py deleted file mode 100644 index aa107507..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/macholib/dylib.py +++ /dev/null @@ -1,63 +0,0 @@ -""" -Generic dylib path manipulation -""" - -import re - -__all__ = ['dylib_info'] - -DYLIB_RE = re.compile(r"""(?x) -(?P^.*)(?:^|/) -(?P - (?P\w+?) - (?:\.(?P[^._]+))? - (?:_(?P[^._]+))? - \.dylib$ -) -""") - -def dylib_info(filename): - """ - A dylib name can take one of the following four forms: - Location/Name.SomeVersion_Suffix.dylib - Location/Name.SomeVersion.dylib - Location/Name_Suffix.dylib - Location/Name.dylib - - returns None if not found or a mapping equivalent to: - dict( - location='Location', - name='Name.SomeVersion_Suffix.dylib', - shortname='Name', - version='SomeVersion', - suffix='Suffix', - ) - - Note that SomeVersion and Suffix are optional and may be None - if not present. - """ - is_dylib = DYLIB_RE.match(filename) - if not is_dylib: - return None - return is_dylib.groupdict() - - -def test_dylib_info(): - def d(location=None, name=None, shortname=None, version=None, suffix=None): - return dict( - location=location, - name=name, - shortname=shortname, - version=version, - suffix=suffix - ) - assert dylib_info('completely/invalid') is None - assert dylib_info('completely/invalide_debug') is None - assert dylib_info('P/Foo.dylib') == d('P', 'Foo.dylib', 'Foo') - assert dylib_info('P/Foo_debug.dylib') == d('P', 'Foo_debug.dylib', 'Foo', suffix='debug') - assert dylib_info('P/Foo.A.dylib') == d('P', 'Foo.A.dylib', 'Foo', 'A') - assert dylib_info('P/Foo_debug.A.dylib') == d('P', 'Foo_debug.A.dylib', 'Foo_debug', 'A') - assert dylib_info('P/Foo.A_debug.dylib') == d('P', 'Foo.A_debug.dylib', 'Foo', 'A', 'debug') - -if __name__ == '__main__': - test_dylib_info() diff --git a/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/macholib/framework.py b/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/macholib/framework.py deleted file mode 100644 index ad6ed554..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/macholib/framework.py +++ /dev/null @@ -1,65 +0,0 @@ -""" -Generic framework path manipulation -""" - -import re - -__all__ = ['framework_info'] - -STRICT_FRAMEWORK_RE = re.compile(r"""(?x) -(?P^.*)(?:^|/) -(?P - (?P\w+).framework/ - (?:Versions/(?P[^/]+)/)? - (?P=shortname) - (?:_(?P[^_]+))? -)$ -""") - -def framework_info(filename): - """ - A framework name can take one of the following four forms: - Location/Name.framework/Versions/SomeVersion/Name_Suffix - Location/Name.framework/Versions/SomeVersion/Name - Location/Name.framework/Name_Suffix - Location/Name.framework/Name - - returns None if not found, or a mapping equivalent to: - dict( - location='Location', - name='Name.framework/Versions/SomeVersion/Name_Suffix', - shortname='Name', - version='SomeVersion', - suffix='Suffix', - ) - - Note that SomeVersion and Suffix are optional and may be None - if not present - """ - is_framework = STRICT_FRAMEWORK_RE.match(filename) - if not is_framework: - return None - return is_framework.groupdict() - -def test_framework_info(): - def d(location=None, name=None, shortname=None, version=None, suffix=None): - return dict( - location=location, - name=name, - shortname=shortname, - version=version, - suffix=suffix - ) - assert framework_info('completely/invalid') is None - assert framework_info('completely/invalid/_debug') is None - assert framework_info('P/F.framework') is None - assert framework_info('P/F.framework/_debug') is None - assert framework_info('P/F.framework/F') == d('P', 'F.framework/F', 'F') - assert framework_info('P/F.framework/F_debug') == d('P', 'F.framework/F_debug', 'F', suffix='debug') - assert framework_info('P/F.framework/Versions') is None - assert framework_info('P/F.framework/Versions/A') is None - assert framework_info('P/F.framework/Versions/A/F') == d('P', 'F.framework/Versions/A/F', 'F', 'A') - assert framework_info('P/F.framework/Versions/A/F_debug') == d('P', 'F.framework/Versions/A/F_debug', 'F', 'A', 'debug') - -if __name__ == '__main__': - test_framework_info() diff --git a/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/util.py b/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/util.py deleted file mode 100644 index 6db0cfbb..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/util.py +++ /dev/null @@ -1,124 +0,0 @@ -#@PydevCodeAnalysisIgnore -import sys, os -import ctypes - -# find_library(name) returns the pathname of a library, or None. -if os.name == "nt": - def find_library(name): - # See MSDN for the REAL search order. - for directory in os.environ['PATH'].split(os.pathsep): - fname = os.path.join(directory, name) - if os.path.exists(fname): - return fname - if fname.lower().endswith(".dll"): - continue - fname = fname + ".dll" - if os.path.exists(fname): - return fname - return None - -if os.name == "ce": - # search path according to MSDN: - # - absolute path specified by filename - # - The .exe launch directory - # - the Windows directory - # - ROM dll files (where are they?) - # - OEM specified search path: HKLM\Loader\SystemPath - def find_library(name): - return name - -if os.name == "posix" and sys.platform == "darwin": - from ctypes.macholib.dyld import dyld_find as _dyld_find - def find_library(name): - possible = ['lib%s.dylib' % name, - '%s.dylib' % name, - '%s.framework/%s' % (name, name)] - for name in possible: - try: - return _dyld_find(name) - except ValueError: - continue - return None - -elif os.name == "posix": - # Andreas Degert's find functions, using gcc, /sbin/ldconfig, objdump - import re, tempfile - - def _findLib_gcc(name): - expr = '[^\(\)\s]*lib%s\.[^\(\)\s]*' % name - cmd = 'if type gcc &>/dev/null; then CC=gcc; else CC=cc; fi;' \ - '$CC -Wl,-t -o /dev/null 2>&1 -l' + name - try: - fdout, outfile = tempfile.mkstemp() - fd = os.popen(cmd) - trace = fd.read() - err = fd.close() - finally: - try: - os.unlink(outfile) - except OSError, e: - import errno - if e.errno != errno.ENOENT: - raise - res = re.search(expr, trace) - if not res: - return None - return res.group(0) - - def _findLib_ld(name): - expr = '/[^\(\)\s]*lib%s\.[^\(\)\s]*' % name - res = re.search(expr, os.popen('/sbin/ldconfig -p 2>/dev/null').read()) - if not res: - # Hm, this works only for libs needed by the python executable. - cmd = 'ldd %s 2>/dev/null' % sys.executable - res = re.search(expr, os.popen(cmd).read()) - if not res: - return None - return res.group(0) - - def _get_soname(f): - cmd = "objdump -p -j .dynamic 2>/dev/null " + f - res = re.search(r'\sSONAME\s+([^\s]+)', os.popen(cmd).read()) - if not res: - return None - return res.group(1) - - def find_library(name): - lib = _findLib_ld(name) or _findLib_gcc(name) - if not lib: - return None - return _get_soname(lib) - -################################################################ -# test code - -def test(): - from ctypes import cdll - if os.name == "nt": - sys.stdout.write('%s\n' % (cdll.msvcrt,)) - sys.stdout.write('%s\n' % (cdll.load("msvcrt"),)) - sys.stdout.write('%s\n' % (find_library("msvcrt"),)) - - if os.name == "posix": - # find and load_version - sys.stdout.write('%s\n' % (find_library("m"),)) - sys.stdout.write('%s\n' % (find_library("c"),)) - sys.stdout.write('%s\n' % (find_library("bz2"),)) - - # getattr -## print_ cdll.m -## print_ cdll.bz2 - - # load - if sys.platform == "darwin": - sys.stdout.write('%s\n' % (cdll.LoadLibrary("libm.dylib"),)) - sys.stdout.write('%s\n' % (cdll.LoadLibrary("libcrypto.dylib"),)) - sys.stdout.write('%s\n' % (cdll.LoadLibrary("libSystem.dylib"),)) - sys.stdout.write('%s\n' % (cdll.LoadLibrary("System.framework/System"),)) - else: - sys.stdout.write('%s\n' % (cdll.LoadLibrary("libm.so"),)) - sys.stdout.write('%s\n' % (cdll.LoadLibrary("libcrypt.so"),)) - sys.stdout.write('%s\n' % (find_library("crypt"),)) - -if __name__ == "__main__": - test() diff --git a/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/wintypes.py b/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/wintypes.py deleted file mode 100644 index d31f11e2..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/ctypes/wintypes.py +++ /dev/null @@ -1,98 +0,0 @@ -#@PydevCodeAnalysisIgnore -# XXX This module needs cleanup. - -from ctypes import * - -DWORD = c_ulong -WORD = c_ushort -BYTE = c_byte - -ULONG = c_ulong -LONG = c_long - -LARGE_INTEGER = c_longlong -ULARGE_INTEGER = c_ulonglong - - -HANDLE = c_ulong # in the header files: void * - -HWND = HANDLE -HDC = HANDLE -HMODULE = HANDLE -HINSTANCE = HANDLE -HRGN = HANDLE -HTASK = HANDLE -HKEY = HANDLE -HPEN = HANDLE -HGDIOBJ = HANDLE -HMENU = HANDLE - -LCID = DWORD - -WPARAM = c_uint -LPARAM = c_long - -BOOL = c_long -VARIANT_BOOL = c_short - -LPCOLESTR = LPOLESTR = OLESTR = c_wchar_p -LPCWSTR = LPWSTR = c_wchar_p - -LPCSTR = LPSTR = c_char_p - -class RECT(Structure): - _fields_ = [("left", c_long), - ("top", c_long), - ("right", c_long), - ("bottom", c_long)] -RECTL = RECT - -class POINT(Structure): - _fields_ = [("x", c_long), - ("y", c_long)] -POINTL = POINT - -class SIZE(Structure): - _fields_ = [("cx", c_long), - ("cy", c_long)] -SIZEL = SIZE - -def RGB(red, green, blue): - return red + (green << 8) + (blue << 16) - -class FILETIME(Structure): - _fields_ = [("dwLowDateTime", DWORD), - ("dwHighDateTime", DWORD)] - -class MSG(Structure): - _fields_ = [("hWnd", HWND), - ("message", c_uint), - ("wParam", WPARAM), - ("lParam", LPARAM), - ("time", DWORD), - ("pt", POINT)] -MAX_PATH = 260 - -class WIN32_FIND_DATAA(Structure): - _fields_ = [("dwFileAttributes", DWORD), - ("ftCreationTime", FILETIME), - ("ftLastAccessTime", FILETIME), - ("ftLastWriteTime", FILETIME), - ("nFileSizeHigh", DWORD), - ("nFileSizeLow", DWORD), - ("dwReserved0", DWORD), - ("dwReserved1", DWORD), - ("cFileName", c_char * MAX_PATH), - ("cAlternameFileName", c_char * 14)] - -class WIN32_FIND_DATAW(Structure): - _fields_ = [("dwFileAttributes", DWORD), - ("ftCreationTime", FILETIME), - ("ftLastAccessTime", FILETIME), - ("ftLastWriteTime", FILETIME), - ("nFileSizeHigh", DWORD), - ("nFileSizeLow", DWORD), - ("dwReserved0", DWORD), - ("dwReserved1", DWORD), - ("cFileName", c_wchar * MAX_PATH), - ("cAlternameFileName", c_wchar * 14)] diff --git a/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/not_in_default_pythonpath.txt b/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/not_in_default_pythonpath.txt deleted file mode 100644 index 24084e9d..00000000 --- a/src/debugpy/_vendored/pydevd/third_party/wrapped_for_pydev/not_in_default_pythonpath.txt +++ /dev/null @@ -1 +0,0 @@ -The wrapped_for_pydev folder is not in the default pythonpath... (no __init__.py file) \ No newline at end of file